From c79f9f2e32e3d40b844f17c259fadc7448ecdcb6 Mon Sep 17 00:00:00 2001 From: MisterY52 <11368116+MisterY52@users.noreply.github.com> Date: Sun, 19 May 2024 18:18:24 +0200 Subject: [PATCH] WIP: dtb fix Updated memflow and memory class in order to fix the dtb value of protected processes. --- apex_dma/Makefile | 5 +- apex_dma/apex_dma.cpp | 3 +- apex_dma/build.sh | 15 +- apex_dma/memflow_lib/.cargo/config | 2 + .../memflow_lib/.github/workflows/build.yml | 176 + apex_dma/memflow_lib/.gitignore | 13 + apex_dma/memflow_lib/CHANGES.md | 42 + apex_dma/memflow_lib/CONTRIBUTE.md | 2 +- apex_dma/memflow_lib/Cargo.toml | 11 +- apex_dma/memflow_lib/LICENSE | 4 +- apex_dma/memflow_lib/README.md | 152 +- apex_dma/memflow_lib/RELEASE.md | 21 + apex_dma/memflow_lib/memflow-bench/Cargo.toml | 27 +- .../memflow-bench/benches/batcher.rs | 75 +- .../memflow-bench/benches/read_dummy.rs | 29 +- .../memflow-bench/benches/read_win32.rs | 84 +- apex_dma/memflow_lib/memflow-bench/src/lib.rs | 1 + .../memflow_lib/memflow-bench/src/phys.rs | 85 +- .../memflow_lib/memflow-bench/src/util.rs | 79 + apex_dma/memflow_lib/memflow-bench/src/vat.rs | 227 +- .../memflow_lib/memflow-bench/src/virt.rs | 252 +- .../memflow-bench/vagrant/Vagrantfile | 39 + .../memflow_lib/memflow-derive/Cargo.toml | 15 +- .../memflow_lib/memflow-derive/src/lib.rs | 625 ++- .../memflow-derive/tests/derive_test.rs | 38 - apex_dma/memflow_lib/memflow-ffi/Cargo.toml | 10 +- apex_dma/memflow_lib/memflow-ffi/README.md | 2 +- apex_dma/memflow_lib/memflow-ffi/bindgen.sh | 26 +- .../memflow_lib/memflow-ffi/cbindgen.toml | 15 +- apex_dma/memflow_lib/memflow-ffi/cglue.toml | 3 + .../memflow_lib/memflow-ffi/examples/Makefile | 19 - .../memflow-ffi/examples/c/.clang-format | 2 + .../memflow-ffi/examples/c/Makefile | 31 + .../memflow-ffi/examples/c/find_process.c | 210 + .../memflow-ffi/examples/c/module_dump.c | 126 + .../memflow-ffi/examples/c/module_list.c | 80 + .../memflow-ffi/examples/c/phys_mem.c | 46 + .../memflow-ffi/examples/c/process_list.c | 125 + .../memflow-ffi/examples/cpp/Makefile | 19 + .../memflow-ffi/examples/cpp/plist.cpp | 104 + .../memflow-ffi/examples/phys_mem.c | 35 - apex_dma/memflow_lib/memflow-ffi/memflow.h | 3740 ++++++++++++++-- apex_dma/memflow_lib/memflow-ffi/memflow.hpp | 3808 +++++++++++++++++ .../memflow_lib/memflow-ffi/memflow_cpp.h | 177 - .../memflow_lib/memflow-ffi/memflow_go.yml | 43 + .../memflow-ffi/src/connectors/mod.rs | 150 - apex_dma/memflow_lib/memflow-ffi/src/lib.rs | 6 +- apex_dma/memflow_lib/memflow-ffi/src/log.rs | 140 +- .../memflow_lib/memflow-ffi/src/mem/mod.rs | 6 +- .../memflow-ffi/src/mem/phys_mem.rs | 146 - .../memflow-ffi/src/mem/virt_mem.rs | 117 - .../memflow_lib/memflow-ffi/src/os/mod.rs | 4 + .../memflow-ffi/src/plugins/mod.rs | 215 + .../memflow_lib/memflow-ffi/src/process.rs | 119 - apex_dma/memflow_lib/memflow-ffi/src/util.rs | 34 - apex_dma/memflow_lib/memflow-ffi/src/win32.rs | 65 - .../memflow_lib/memflow-ffi/verify_headers.sh | 12 + .../memflow-qemu-procfs/.gitignore | 5 - .../memflow-qemu-procfs/Cargo.toml | 37 - .../memflow_lib/memflow-qemu-procfs/Makefile | 24 - .../memflow_lib/memflow-qemu-procfs/README.md | 55 - .../memflow-qemu-procfs/examples/read_phys.rs | 45 - .../memflow-qemu-procfs/install.sh | 19 - .../memflow-qemu-procfs/src/lib.rs | 475 -- .../memflow_lib/memflow-win32-ffi/.gitignore | 6 - .../memflow_lib/memflow-win32-ffi/Cargo.toml | 26 - .../memflow_lib/memflow-win32-ffi/README.md | 47 - .../memflow_lib/memflow-win32-ffi/bindgen.sh | 3 - .../memflow-win32-ffi/cbindgen.toml | 22 - .../memflow-win32-ffi/examples/Makefile | 22 - .../memflow-win32-ffi/examples/dump_header.c | 61 - .../memflow-win32-ffi/examples/process_list.c | 54 - .../memflow-win32-ffi/memflow_win32.h | 321 -- .../memflow-win32-ffi/memflow_win32_cpp.h | 151 - .../memflow-win32-ffi/src/kernel/mod.rs | 1 - .../src/kernel/start_block.rs | 17 - .../memflow_lib/memflow-win32-ffi/src/lib.rs | 2 - .../memflow-win32-ffi/src/win32/kernel.rs | 331 -- .../memflow-win32-ffi/src/win32/mod.rs | 4 - .../memflow-win32-ffi/src/win32/module.rs | 24 - .../memflow-win32-ffi/src/win32/process.rs | 136 - .../src/win32/process_info.rs | 81 - .../.github/workflows/binary-build.yml | 43 + .../memflow-win32/.github/workflows/build.yml | 115 + apex_dma/memflow_lib/memflow-win32/.gitignore | 10 + apex_dma/memflow_lib/memflow-win32/Cargo.toml | 90 +- .../LICENSE | 7 +- apex_dma/memflow_lib/memflow-win32/README.md | 36 +- apex_dma/memflow_lib/memflow-win32/build.rs | 50 - .../memflow-win32/examples/dump_offsets.rs | 110 - .../memflow-win32/examples/integration.rs | 211 - .../memflow-win32/examples/multithreading.rs | 136 - .../memflow-win32/examples/read_bench.rs | 216 - .../memflow-win32/examples/read_keys.rs | 69 - apex_dma/memflow_lib/memflow-win32/install.sh | 19 + .../memflow-win32-defs/Cargo.toml | 42 + .../examples/generate_offsets.rs | 76 +- .../src/kernel.rs} | 14 +- .../memflow-win32-defs/src/lib.rs | 5 + .../src/offsets/builder.rs | 130 +- .../memflow-win32-defs/src/offsets/mod.rs | 489 +++ .../src/offsets/offset_table.rs | 97 +- .../src/offsets/pdb.rs} | 51 +- .../src/offsets/pdb}/data.rs | 43 +- .../src/offsets/symstore.rs | 71 +- .../memflow-win32/memflow-win32/Cargo.toml | 70 + .../memflow-win32/memflow-win32/build.rs | 168 + .../memflow-win32/examples/dump_offsets.rs | 120 + .../memflow-win32/examples/open_process.rs | 107 + .../memflow-win32/examples/process_list.rs | 97 + ...X64_0AFB69F5FD264D54673570E37B38A3181.toml | 18 +- ...X64_1C9875F76C8F0FBF3EB9A9D7C1C274061.toml | 34 + ...X64_9C00B19DBDE003DBFE4AB4216993C8431.toml | 34 + ...X64_BBED7C2955FBE4522AAA23F4B8677AD91.toml | 18 +- ...X86_1B1D6AA205E1C87DC63A314ACAA50B491.toml | 18 +- ...64_5F0CF5D532F385333A9B4ABA25CA65961.toml} | 16 +- ...X86_55678BC384F099B6ED05E9E39046924A1.toml | 34 + .../memflow-win32/offsets/3_10_511_X86.toml | 35 + .../offsets/4_0_1381_X86.toml | 12 + ...X64_82DCF67A38274C9CA99B60B421D2786D2.toml | 34 + ...X64_ECE191A20CFF4465AE46DF96C22638451.toml | 18 +- ...X86_684DA42A30CC450F81C535B4D18944B12.toml | 18 +- .../memflow-win32/src/kernel/mod.rs | 6 + .../{ => memflow-win32}/src/kernel/ntos.rs | 93 +- .../memflow-win32/src/kernel/ntos/pehelper.rs | 64 + .../memflow-win32/src/kernel/ntos/x64.rs | 108 + .../src/kernel/ntos/x86.rs | 31 +- .../memflow-win32/src/kernel/start_block.rs | 94 + .../src/kernel/start_block/aarch64.rs | 62 + .../src/kernel/start_block/x64.rs | 31 +- .../src/kernel/start_block/x86.rs | 15 +- .../src/kernel/start_block/x86pae.rs | 16 +- .../{ => memflow-win32}/src/kernel/sysproc.rs | 50 +- .../{ => memflow-win32}/src/lib.rs | 7 +- .../memflow-win32/src/offsets/mod.rs | 40 + .../memflow-win32/src/plugins.rs | 155 + .../{ => memflow-win32}/src/win32.rs | 6 +- .../memflow-win32/src/win32/kernel.rs | 714 ++++ .../memflow-win32/src/win32/kernel/mem_map.rs | 66 + .../src/win32/kernel_builder.rs | 167 +- .../src/win32/kernel_info.rs | 79 +- .../memflow-win32/src/win32/keyboard.rs | 339 ++ .../memflow-win32/src/win32/module.rs | 152 + .../memflow-win32/src/win32/process.rs | 501 +++ .../src/win32/unicode_string.rs | 52 +- .../memflow-win32/src/win32/vat.rs | 65 + ...X64_82DCF67A38274C9CA99B60B421D2786D2.toml | 21 - .../memflow_lib/memflow-win32/src/error.rs | 126 - .../memflow-win32/src/kernel/ntos/pehelper.rs | 57 - .../memflow-win32/src/kernel/ntos/x64.rs | 106 - .../memflow-win32/src/kernel/start_block.rs | 75 - .../memflow-win32/src/offsets/mod.rs | 330 -- .../memflow-win32/src/win32/kernel.rs | 530 --- .../memflow-win32/src/win32/keyboard.rs | 210 - .../memflow-win32/src/win32/module.rs | 37 - .../memflow-win32/src/win32/process.rs | 394 -- .../memflow-win32/src/win32/vat.rs | 56 - apex_dma/memflow_lib/memflow/Cargo.toml | 120 +- .../memflow/examples/cached_view.rs | 296 ++ .../memflow/examples/integration.rs | 181 + .../memflow/examples/kernel_exports.rs | 81 + .../memflow/examples/kernel_maps.rs | 85 + .../memflow/examples/kernel_modules.rs | 87 + .../memflow_lib/memflow/examples/keyboard.rs | 84 + .../memflow_lib/memflow/examples/mem_maps.rs | 108 + .../memflow/examples/module_info.rs | 115 + .../memflow/examples/module_list.rs | 111 + .../memflow/examples/multithreading.rs | 160 + .../memflow/examples/open_process.rs | 159 + .../memflow/examples/process_list.rs | 87 + .../memflow/examples/read_bench.rs | 218 + .../memflow/examples/target_list.rs | 59 + .../memflow/src/architecture/arm/aarch64.rs | 48 + .../memflow/src/architecture/arm/mod.rs | 167 + .../memflow/src/architecture/mmu_spec.rs | 526 --- .../architecture/mmu_spec/translate_data.rs | 245 -- .../memflow/src/architecture/mod.rs | 129 +- .../memflow/src/architecture/x86/mod.rs | 101 +- .../memflow/src/architecture/x86/x32.rs | 45 +- .../memflow/src/architecture/x86/x32_pae.rs | 45 +- .../memflow/src/architecture/x86/x64.rs | 140 +- .../memflow_lib/memflow/src/connector/args.rs | 183 - .../memflow/src/connector/cpu_state.rs | 38 + .../memflow/src/connector/fileio.rs | 231 +- .../memflow/src/connector/filemap.rs | 73 +- .../memflow/src/connector/inventory.rs | 421 -- .../memflow_lib/memflow/src/connector/mmap.rs | 131 +- .../memflow_lib/memflow/src/connector/mod.rs | 25 +- apex_dma/memflow_lib/memflow/src/dummy/mem.rs | 125 + apex_dma/memflow_lib/memflow/src/dummy/mod.rs | 10 + .../memflow/src/dummy/offset_pt.rs | 261 ++ apex_dma/memflow_lib/memflow/src/dummy/os.rs | 648 +++ .../memflow_lib/memflow/src/dummy/process.rs | 215 + apex_dma/memflow_lib/memflow/src/error.rs | 542 ++- .../src/iter/double_buffered_iterator.rs | 2 +- apex_dma/memflow_lib/memflow/src/iter/mod.rs | 62 +- .../memflow/src/iter/page_chunks.rs | 421 +- apex_dma/memflow_lib/memflow/src/iter/void.rs | 32 - apex_dma/memflow_lib/memflow/src/lib.rs | 288 +- apex_dma/memflow_lib/memflow/src/mem/dummy.rs | 458 -- .../memflow_lib/memflow/src/mem/mem_data.rs | 131 + .../memflow_lib/memflow/src/mem/mem_map.rs | 378 +- .../src/mem/memory_view/arch_overlay.rs | 53 + .../memflow/src/mem/memory_view/batcher.rs | 390 ++ .../src/mem/memory_view/cached_view.rs | 373 ++ .../memflow/src/mem/memory_view/cursor.rs | 508 +++ .../memflow/src/mem/memory_view/mod.rs | 632 +++ .../memflow/src/mem/memory_view/remap_view.rs | 82 + apex_dma/memflow_lib/memflow/src/mem/mod.rs | 57 +- .../memflow_lib/memflow/src/mem/phys_mem.rs | 249 -- .../middleware/cache/mod.rs} | 245 +- .../middleware}/cache/page_cache.rs | 299 +- .../src/mem/phys_mem/middleware/delay.rs | 223 + .../src/mem/phys_mem/middleware/metrics.rs | 336 ++ .../src/mem/phys_mem/middleware/mod.rs | 17 + .../memflow/src/mem/phys_mem/mod.rs | 337 ++ .../memflow/src/mem/phys_mem_batcher.rs | 93 - .../memflow_lib/memflow/src/mem/virt_mem.rs | 262 -- .../memflow/src/mem/virt_mem/mod.rs | 4 + .../memflow/src/mem/virt_mem/virtual_dma.rs | 318 +- .../memflow/src/mem/virt_mem_batcher.rs | 77 - .../memflow/src/mem/virt_translate.rs | 139 - .../cache/mod.rs} | 192 +- .../{ => virt_translate}/cache/tlb_cache.rs | 46 +- .../mem/virt_translate/direct_translate.rs | 38 +- .../memflow/src/mem/virt_translate/mmu/def.rs | 162 + .../memflow/src/mem/virt_translate/mmu/mod.rs | 90 + .../src/mem/virt_translate/mmu/spec.rs | 716 ++++ .../mem/virt_translate/mmu/translate_data.rs | 342 ++ .../memflow/src/mem/virt_translate/mod.rs | 951 ++++ .../memflow/src/mem/virt_translate/tests.rs | 203 +- .../memflow_lib/memflow/src/os/keyboard.rs | 41 + apex_dma/memflow_lib/memflow/src/os/mod.rs | 39 + apex_dma/memflow_lib/memflow/src/os/module.rs | 123 + .../memflow_lib/memflow/src/os/process.rs | 364 ++ apex_dma/memflow_lib/memflow/src/os/root.rs | 387 ++ apex_dma/memflow_lib/memflow/src/os/util.rs | 491 +++ .../memflow_lib/memflow/src/plugins/args.rs | 728 ++++ .../memflow/src/plugins/connector.rs | 406 ++ .../memflow_lib/memflow/src/plugins/logger.rs | 173 + .../memflow_lib/memflow/src/plugins/mod.rs | 1215 ++++++ .../memflow_lib/memflow/src/plugins/os.rs | 144 + .../memflow_lib/memflow/src/plugins/util.rs | 159 + .../memflow_lib/memflow/src/process/mod.rs | 81 - .../memflow_lib/memflow/src/types/address.rs | 639 ++- .../memflow/src/types/byte_swap.rs | 2 +- .../{mem => types}/cache/count_validator.rs | 21 +- .../memflow/src/{mem => types}/cache/mod.rs | 33 +- .../{mem => types}/cache/timed_validator.rs | 23 +- .../memflow/src/types/gap_remover.rs | 74 + apex_dma/memflow_lib/memflow/src/types/mem.rs | 7 + .../memflow/src/types/mem_units.rs | 72 + apex_dma/memflow_lib/memflow/src/types/mod.rs | 37 +- .../memflow_lib/memflow/src/types/page.rs | 10 +- .../memflow/src/types/physical_address.rs | 178 +- .../memflow_lib/memflow/src/types/pointer.rs | 539 +++ .../memflow/src/types/pointer32.rs | 270 -- .../memflow/src/types/pointer64.rs | 269 -- .../memflow_lib/memflow/src/types/size.rs | 49 - apex_dma/memflow_lib/nostd-test/Cargo.toml | 15 +- apex_dma/memflow_lib/nostd-test/src/main.rs | 15 +- apex_dma/memory.cpp | 132 +- apex_dma/memory.h | 63 +- 263 files changed, 31543 insertions(+), 12224 deletions(-) create mode 100644 apex_dma/memflow_lib/.cargo/config create mode 100644 apex_dma/memflow_lib/.github/workflows/build.yml create mode 100644 apex_dma/memflow_lib/.gitignore create mode 100644 apex_dma/memflow_lib/RELEASE.md create mode 100644 apex_dma/memflow_lib/memflow-bench/src/util.rs create mode 100644 apex_dma/memflow_lib/memflow-bench/vagrant/Vagrantfile delete mode 100644 apex_dma/memflow_lib/memflow-derive/tests/derive_test.rs create mode 100644 apex_dma/memflow_lib/memflow-ffi/cglue.toml delete mode 100644 apex_dma/memflow_lib/memflow-ffi/examples/Makefile create mode 100644 apex_dma/memflow_lib/memflow-ffi/examples/c/.clang-format create mode 100644 apex_dma/memflow_lib/memflow-ffi/examples/c/Makefile create mode 100644 apex_dma/memflow_lib/memflow-ffi/examples/c/find_process.c create mode 100644 apex_dma/memflow_lib/memflow-ffi/examples/c/module_dump.c create mode 100644 apex_dma/memflow_lib/memflow-ffi/examples/c/module_list.c create mode 100644 apex_dma/memflow_lib/memflow-ffi/examples/c/phys_mem.c create mode 100644 apex_dma/memflow_lib/memflow-ffi/examples/c/process_list.c create mode 100644 apex_dma/memflow_lib/memflow-ffi/examples/cpp/Makefile create mode 100644 apex_dma/memflow_lib/memflow-ffi/examples/cpp/plist.cpp delete mode 100644 apex_dma/memflow_lib/memflow-ffi/examples/phys_mem.c create mode 100644 apex_dma/memflow_lib/memflow-ffi/memflow.hpp delete mode 100644 apex_dma/memflow_lib/memflow-ffi/memflow_cpp.h create mode 100644 apex_dma/memflow_lib/memflow-ffi/memflow_go.yml delete mode 100644 apex_dma/memflow_lib/memflow-ffi/src/connectors/mod.rs delete mode 100644 apex_dma/memflow_lib/memflow-ffi/src/mem/phys_mem.rs delete mode 100644 apex_dma/memflow_lib/memflow-ffi/src/mem/virt_mem.rs create mode 100644 apex_dma/memflow_lib/memflow-ffi/src/os/mod.rs create mode 100644 apex_dma/memflow_lib/memflow-ffi/src/plugins/mod.rs delete mode 100644 apex_dma/memflow_lib/memflow-ffi/src/process.rs delete mode 100644 apex_dma/memflow_lib/memflow-ffi/src/win32.rs create mode 100644 apex_dma/memflow_lib/memflow-ffi/verify_headers.sh delete mode 100644 apex_dma/memflow_lib/memflow-qemu-procfs/.gitignore delete mode 100644 apex_dma/memflow_lib/memflow-qemu-procfs/Cargo.toml delete mode 100644 apex_dma/memflow_lib/memflow-qemu-procfs/Makefile delete mode 100644 apex_dma/memflow_lib/memflow-qemu-procfs/README.md delete mode 100644 apex_dma/memflow_lib/memflow-qemu-procfs/examples/read_phys.rs delete mode 100644 apex_dma/memflow_lib/memflow-qemu-procfs/install.sh delete mode 100644 apex_dma/memflow_lib/memflow-qemu-procfs/src/lib.rs delete mode 100644 apex_dma/memflow_lib/memflow-win32-ffi/.gitignore delete mode 100644 apex_dma/memflow_lib/memflow-win32-ffi/Cargo.toml delete mode 100644 apex_dma/memflow_lib/memflow-win32-ffi/README.md delete mode 100644 apex_dma/memflow_lib/memflow-win32-ffi/bindgen.sh delete mode 100644 apex_dma/memflow_lib/memflow-win32-ffi/cbindgen.toml delete mode 100644 apex_dma/memflow_lib/memflow-win32-ffi/examples/Makefile delete mode 100644 apex_dma/memflow_lib/memflow-win32-ffi/examples/dump_header.c delete mode 100644 apex_dma/memflow_lib/memflow-win32-ffi/examples/process_list.c delete mode 100644 apex_dma/memflow_lib/memflow-win32-ffi/memflow_win32.h delete mode 100644 apex_dma/memflow_lib/memflow-win32-ffi/memflow_win32_cpp.h delete mode 100644 apex_dma/memflow_lib/memflow-win32-ffi/src/kernel/mod.rs delete mode 100644 apex_dma/memflow_lib/memflow-win32-ffi/src/kernel/start_block.rs delete mode 100644 apex_dma/memflow_lib/memflow-win32-ffi/src/lib.rs delete mode 100644 apex_dma/memflow_lib/memflow-win32-ffi/src/win32/kernel.rs delete mode 100644 apex_dma/memflow_lib/memflow-win32-ffi/src/win32/mod.rs delete mode 100644 apex_dma/memflow_lib/memflow-win32-ffi/src/win32/module.rs delete mode 100644 apex_dma/memflow_lib/memflow-win32-ffi/src/win32/process.rs delete mode 100644 apex_dma/memflow_lib/memflow-win32-ffi/src/win32/process_info.rs create mode 100644 apex_dma/memflow_lib/memflow-win32/.github/workflows/binary-build.yml create mode 100644 apex_dma/memflow_lib/memflow-win32/.github/workflows/build.yml create mode 100644 apex_dma/memflow_lib/memflow-win32/.gitignore rename apex_dma/memflow_lib/{memflow-qemu-procfs => memflow-win32}/LICENSE (89%) delete mode 100644 apex_dma/memflow_lib/memflow-win32/build.rs delete mode 100644 apex_dma/memflow_lib/memflow-win32/examples/dump_offsets.rs delete mode 100644 apex_dma/memflow_lib/memflow-win32/examples/integration.rs delete mode 100644 apex_dma/memflow_lib/memflow-win32/examples/multithreading.rs delete mode 100644 apex_dma/memflow_lib/memflow-win32/examples/read_bench.rs delete mode 100644 apex_dma/memflow_lib/memflow-win32/examples/read_keys.rs create mode 100644 apex_dma/memflow_lib/memflow-win32/install.sh create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/Cargo.toml rename apex_dma/memflow_lib/memflow-win32/{ => memflow-win32-defs}/examples/generate_offsets.rs (53%) rename apex_dma/memflow_lib/memflow-win32/{src/kernel/mod.rs => memflow-win32-defs/src/kernel.rs} (95%) create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/lib.rs rename apex_dma/memflow_lib/memflow-win32/{ => memflow-win32-defs}/src/offsets/builder.rs (51%) create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/offsets/mod.rs rename apex_dma/memflow_lib/memflow-win32/{ => memflow-win32-defs}/src/offsets/offset_table.rs (64%) rename apex_dma/memflow_lib/memflow-win32/{src/offsets/pdb_struct.rs => memflow-win32-defs/src/offsets/pdb.rs} (69%) rename apex_dma/memflow_lib/memflow-win32/{src/offsets/pdb_struct => memflow-win32-defs/src/offsets/pdb}/data.rs (94%) rename apex_dma/memflow_lib/memflow-win32/{ => memflow-win32-defs}/src/offsets/symstore.rs (60%) create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32/Cargo.toml create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32/build.rs create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32/examples/dump_offsets.rs create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32/examples/open_process.rs create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32/examples/process_list.rs rename apex_dma/memflow_lib/memflow-win32/{ => memflow-win32}/offsets/10_0_18362_X64_0AFB69F5FD264D54673570E37B38A3181.toml (55%) create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_19041_X64_1C9875F76C8F0FBF3EB9A9D7C1C274061.toml create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_19041_X64_9C00B19DBDE003DBFE4AB4216993C8431.toml rename apex_dma/memflow_lib/memflow-win32/{ => memflow-win32}/offsets/10_0_19041_X64_BBED7C2955FBE4522AAA23F4B8677AD91.toml (55%) rename apex_dma/memflow_lib/memflow-win32/{ => memflow-win32}/offsets/10_0_19041_X86_1B1D6AA205E1C87DC63A314ACAA50B491.toml (54%) rename apex_dma/memflow_lib/memflow-win32/{offsets/10_0_19041_X64_1C9875F76C8F0FBF3EB9A9D7C1C274061.toml => memflow-win32/offsets/10_0_19045_X64_5F0CF5D532F385333A9B4ABA25CA65961.toml} (58%) create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_22000_X86_55678BC384F099B6ED05E9E39046924A1.toml create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/3_10_511_X86.toml rename apex_dma/memflow_lib/memflow-win32/{ => memflow-win32}/offsets/4_0_1381_X86.toml (67%) create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/5_2_3790_X64_82DCF67A38274C9CA99B60B421D2786D2.toml rename apex_dma/memflow_lib/memflow-win32/{ => memflow-win32}/offsets/6_1_7601_X64_ECE191A20CFF4465AE46DF96C22638451.toml (54%) rename apex_dma/memflow_lib/memflow-win32/{ => memflow-win32}/offsets/6_1_7601_X86_684DA42A30CC450F81C535B4D18944B12.toml (54%) create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/mod.rs rename apex_dma/memflow_lib/memflow-win32/{ => memflow-win32}/src/kernel/ntos.rs (52%) create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/ntos/pehelper.rs create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/ntos/x64.rs rename apex_dma/memflow_lib/memflow-win32/{ => memflow-win32}/src/kernel/ntos/x86.rs (67%) create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/start_block.rs create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/start_block/aarch64.rs rename apex_dma/memflow_lib/memflow-win32/{ => memflow-win32}/src/kernel/start_block/x64.rs (71%) rename apex_dma/memflow_lib/memflow-win32/{ => memflow-win32}/src/kernel/start_block/x86.rs (60%) rename apex_dma/memflow_lib/memflow-win32/{ => memflow-win32}/src/kernel/start_block/x86pae.rs (55%) rename apex_dma/memflow_lib/memflow-win32/{ => memflow-win32}/src/kernel/sysproc.rs (58%) rename apex_dma/memflow_lib/memflow-win32/{ => memflow-win32}/src/lib.rs (82%) create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32/src/offsets/mod.rs create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32/src/plugins.rs rename apex_dma/memflow_lib/memflow-win32/{ => memflow-win32}/src/win32.rs (69%) create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/kernel.rs create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/kernel/mem_map.rs rename apex_dma/memflow_lib/memflow-win32/{ => memflow-win32}/src/win32/kernel_builder.rs (65%) rename apex_dma/memflow_lib/memflow-win32/{ => memflow-win32}/src/win32/kernel_info.rs (65%) create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/keyboard.rs create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/module.rs create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/process.rs rename apex_dma/memflow_lib/memflow-win32/{ => memflow-win32}/src/win32/unicode_string.rs (56%) create mode 100644 apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/vat.rs delete mode 100644 apex_dma/memflow_lib/memflow-win32/offsets/5_2_3790_X64_82DCF67A38274C9CA99B60B421D2786D2.toml delete mode 100644 apex_dma/memflow_lib/memflow-win32/src/error.rs delete mode 100644 apex_dma/memflow_lib/memflow-win32/src/kernel/ntos/pehelper.rs delete mode 100644 apex_dma/memflow_lib/memflow-win32/src/kernel/ntos/x64.rs delete mode 100644 apex_dma/memflow_lib/memflow-win32/src/kernel/start_block.rs delete mode 100644 apex_dma/memflow_lib/memflow-win32/src/offsets/mod.rs delete mode 100644 apex_dma/memflow_lib/memflow-win32/src/win32/kernel.rs delete mode 100644 apex_dma/memflow_lib/memflow-win32/src/win32/keyboard.rs delete mode 100644 apex_dma/memflow_lib/memflow-win32/src/win32/module.rs delete mode 100644 apex_dma/memflow_lib/memflow-win32/src/win32/process.rs delete mode 100644 apex_dma/memflow_lib/memflow-win32/src/win32/vat.rs create mode 100644 apex_dma/memflow_lib/memflow/examples/cached_view.rs create mode 100644 apex_dma/memflow_lib/memflow/examples/integration.rs create mode 100644 apex_dma/memflow_lib/memflow/examples/kernel_exports.rs create mode 100644 apex_dma/memflow_lib/memflow/examples/kernel_maps.rs create mode 100644 apex_dma/memflow_lib/memflow/examples/kernel_modules.rs create mode 100644 apex_dma/memflow_lib/memflow/examples/keyboard.rs create mode 100644 apex_dma/memflow_lib/memflow/examples/mem_maps.rs create mode 100644 apex_dma/memflow_lib/memflow/examples/module_info.rs create mode 100644 apex_dma/memflow_lib/memflow/examples/module_list.rs create mode 100644 apex_dma/memflow_lib/memflow/examples/multithreading.rs create mode 100644 apex_dma/memflow_lib/memflow/examples/open_process.rs create mode 100644 apex_dma/memflow_lib/memflow/examples/process_list.rs create mode 100644 apex_dma/memflow_lib/memflow/examples/read_bench.rs create mode 100644 apex_dma/memflow_lib/memflow/examples/target_list.rs create mode 100644 apex_dma/memflow_lib/memflow/src/architecture/arm/aarch64.rs create mode 100644 apex_dma/memflow_lib/memflow/src/architecture/arm/mod.rs delete mode 100644 apex_dma/memflow_lib/memflow/src/architecture/mmu_spec.rs delete mode 100644 apex_dma/memflow_lib/memflow/src/architecture/mmu_spec/translate_data.rs delete mode 100644 apex_dma/memflow_lib/memflow/src/connector/args.rs create mode 100644 apex_dma/memflow_lib/memflow/src/connector/cpu_state.rs delete mode 100644 apex_dma/memflow_lib/memflow/src/connector/inventory.rs create mode 100644 apex_dma/memflow_lib/memflow/src/dummy/mem.rs create mode 100644 apex_dma/memflow_lib/memflow/src/dummy/mod.rs create mode 100644 apex_dma/memflow_lib/memflow/src/dummy/offset_pt.rs create mode 100644 apex_dma/memflow_lib/memflow/src/dummy/os.rs create mode 100644 apex_dma/memflow_lib/memflow/src/dummy/process.rs delete mode 100644 apex_dma/memflow_lib/memflow/src/iter/void.rs delete mode 100644 apex_dma/memflow_lib/memflow/src/mem/dummy.rs create mode 100644 apex_dma/memflow_lib/memflow/src/mem/mem_data.rs create mode 100644 apex_dma/memflow_lib/memflow/src/mem/memory_view/arch_overlay.rs create mode 100644 apex_dma/memflow_lib/memflow/src/mem/memory_view/batcher.rs create mode 100644 apex_dma/memflow_lib/memflow/src/mem/memory_view/cached_view.rs create mode 100644 apex_dma/memflow_lib/memflow/src/mem/memory_view/cursor.rs create mode 100644 apex_dma/memflow_lib/memflow/src/mem/memory_view/mod.rs create mode 100644 apex_dma/memflow_lib/memflow/src/mem/memory_view/remap_view.rs delete mode 100644 apex_dma/memflow_lib/memflow/src/mem/phys_mem.rs rename apex_dma/memflow_lib/memflow/src/mem/{cache/cached_memory_access.rs => phys_mem/middleware/cache/mod.rs} (59%) rename apex_dma/memflow_lib/memflow/src/mem/{ => phys_mem/middleware}/cache/page_cache.rs (67%) create mode 100644 apex_dma/memflow_lib/memflow/src/mem/phys_mem/middleware/delay.rs create mode 100644 apex_dma/memflow_lib/memflow/src/mem/phys_mem/middleware/metrics.rs create mode 100644 apex_dma/memflow_lib/memflow/src/mem/phys_mem/middleware/mod.rs create mode 100644 apex_dma/memflow_lib/memflow/src/mem/phys_mem/mod.rs delete mode 100644 apex_dma/memflow_lib/memflow/src/mem/phys_mem_batcher.rs delete mode 100644 apex_dma/memflow_lib/memflow/src/mem/virt_mem.rs create mode 100644 apex_dma/memflow_lib/memflow/src/mem/virt_mem/mod.rs delete mode 100644 apex_dma/memflow_lib/memflow/src/mem/virt_mem_batcher.rs delete mode 100644 apex_dma/memflow_lib/memflow/src/mem/virt_translate.rs rename apex_dma/memflow_lib/memflow/src/mem/{cache/cached_vat.rs => virt_translate/cache/mod.rs} (58%) rename apex_dma/memflow_lib/memflow/src/mem/{ => virt_translate}/cache/tlb_cache.rs (77%) create mode 100644 apex_dma/memflow_lib/memflow/src/mem/virt_translate/mmu/def.rs create mode 100644 apex_dma/memflow_lib/memflow/src/mem/virt_translate/mmu/mod.rs create mode 100644 apex_dma/memflow_lib/memflow/src/mem/virt_translate/mmu/spec.rs create mode 100644 apex_dma/memflow_lib/memflow/src/mem/virt_translate/mmu/translate_data.rs create mode 100644 apex_dma/memflow_lib/memflow/src/mem/virt_translate/mod.rs create mode 100644 apex_dma/memflow_lib/memflow/src/os/keyboard.rs create mode 100644 apex_dma/memflow_lib/memflow/src/os/mod.rs create mode 100644 apex_dma/memflow_lib/memflow/src/os/module.rs create mode 100644 apex_dma/memflow_lib/memflow/src/os/process.rs create mode 100644 apex_dma/memflow_lib/memflow/src/os/root.rs create mode 100644 apex_dma/memflow_lib/memflow/src/os/util.rs create mode 100644 apex_dma/memflow_lib/memflow/src/plugins/args.rs create mode 100644 apex_dma/memflow_lib/memflow/src/plugins/connector.rs create mode 100644 apex_dma/memflow_lib/memflow/src/plugins/logger.rs create mode 100644 apex_dma/memflow_lib/memflow/src/plugins/mod.rs create mode 100644 apex_dma/memflow_lib/memflow/src/plugins/os.rs create mode 100644 apex_dma/memflow_lib/memflow/src/plugins/util.rs delete mode 100644 apex_dma/memflow_lib/memflow/src/process/mod.rs rename apex_dma/memflow_lib/memflow/src/{mem => types}/cache/count_validator.rs (77%) rename apex_dma/memflow_lib/memflow/src/{mem => types}/cache/mod.rs (51%) rename apex_dma/memflow_lib/memflow/src/{mem => types}/cache/timed_validator.rs (71%) create mode 100644 apex_dma/memflow_lib/memflow/src/types/gap_remover.rs create mode 100644 apex_dma/memflow_lib/memflow/src/types/mem.rs create mode 100644 apex_dma/memflow_lib/memflow/src/types/mem_units.rs create mode 100644 apex_dma/memflow_lib/memflow/src/types/pointer.rs delete mode 100644 apex_dma/memflow_lib/memflow/src/types/pointer32.rs delete mode 100644 apex_dma/memflow_lib/memflow/src/types/pointer64.rs delete mode 100644 apex_dma/memflow_lib/memflow/src/types/size.rs diff --git a/apex_dma/Makefile b/apex_dma/Makefile index 6ed390b..5356e6e 100644 --- a/apex_dma/Makefile +++ b/apex_dma/Makefile @@ -1,12 +1,11 @@ CXX=g++ -CXXFLAGS=-I./memflow_lib/memflow-win32-ffi/ -I./memflow_lib/memflow-ffi/ -L./memflow_lib/target/release -Wno-multichar -LIBS=-lm -Wl,--no-as-needed -ldl -lpthread -l:libmemflow_win32_ffi.a +CXXFLAGS=-I./memflow_lib/memflow-ffi/ -L./memflow_lib/target/release -Wno-multichar +LIBS=-lm -ldl -lpthread -l:libmemflow_ffi.a OUTDIR=./build OBJDIR=$(OUTDIR)/obj $(shell mkdir -p $(OBJDIR)) -$(shell cp memflow_lib/memflow-qemu-procfs/target/release/libmemflow_qemu_procfs.so $(OUTDIR)) %.o: %.cpp $(CXX) -c -o $(OBJDIR)/$@ $< $(CXXFLAGS) diff --git a/apex_dma/apex_dma.cpp b/apex_dma/apex_dma.cpp index 3066554..4f772ab 100644 --- a/apex_dma/apex_dma.cpp +++ b/apex_dma/apex_dma.cpp @@ -705,8 +705,7 @@ int main(int argc, char *argv[]) { if(geteuid() != 0) { - printf("Error: %s is not running as root\n", argv[0]); - return 0; + printf("Warning: %s is not running as root\n", argv[0]); } const char* cl_proc = "client_ap.exe"; diff --git a/apex_dma/build.sh b/apex_dma/build.sh index 129edca..a733f22 100644 --- a/apex_dma/build.sh +++ b/apex_dma/build.sh @@ -1,16 +1,9 @@ #!/bin/bash -cd memflow_lib/memflow-win32-ffi/ +cd memflow_lib/memflow-ffi/ if cargo build --release ; then - cd ../memflow-qemu-procfs - - if cargo build --release --all-features ; then - cd ../../ - make - else - echo "Error while building memflow-qemu-procfs" - fi - + cd ../../ + make else - echo "Error while building memflow-win32-ffi" + echo "Error while building memflow-ffi" fi diff --git a/apex_dma/memflow_lib/.cargo/config b/apex_dma/memflow_lib/.cargo/config new file mode 100644 index 0000000..7cad562 --- /dev/null +++ b/apex_dma/memflow_lib/.cargo/config @@ -0,0 +1,2 @@ +[target.'cfg(unix)'] +runner = "./runner.sh" \ No newline at end of file diff --git a/apex_dma/memflow_lib/.github/workflows/build.yml b/apex_dma/memflow_lib/.github/workflows/build.yml new file mode 100644 index 0000000..78d3bb2 --- /dev/null +++ b/apex_dma/memflow_lib/.github/workflows/build.yml @@ -0,0 +1,176 @@ +name: Build and test + +on: [push, pull_request] + +env: + CARGO_TERM_COLOR: always + +jobs: + + build: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [macos-latest, ubuntu-latest, windows-latest] + flags: [--all-features, --no-default-features] + steps: + - uses: actions/checkout@v2 + - name: Install rust 1.70.0 + uses: actions-rs/toolchain@v1 + with: + toolchain: 1.70.0 + override: true + + - name: Build + run: cargo build --workspace ${{ matrix.flags }} --verbose + + - name: Build examples + run: cargo build --workspace ${{ matrix.flags }} --examples --verbose + + build-cross-targets: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + target: [aarch64-unknown-linux-gnu, armv7-unknown-linux-gnueabihf, i686-unknown-linux-gnu] + steps: + - uses: actions/checkout@v2 + - name: Install rust 1.70.0 + uses: actions-rs/toolchain@v1 + with: + toolchain: 1.70.0 + target: ${{ matrix.target }} + override: true + - uses: actions-rs/cargo@v1 + with: + use-cross: true + command: build + args: --target ${{ matrix.target }} --workspace --all-features --verbose + + test: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [macos-latest, ubuntu-latest, windows-latest] + steps: + - uses: actions/checkout@v2 + - name: Install rust 1.70.0 + uses: actions-rs/toolchain@v1 + with: + toolchain: 1.70.0 + override: true + + - name: Pre-build binaries (for inventory integration tests) + run: cargo build --workspace --all-features --verbose + + - name: Run all tests + run: cargo test --workspace --all-features --verbose + if: runner.os == 'Linux' + + - name: Run all tests + run: cargo test --workspace --exclude memflow-derive --all-features --verbose + if: runner.os != 'Linux' + + test-cross: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + target: [aarch64-unknown-linux-gnu, i686-unknown-linux-gnu] + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + toolchain: nightly # currently required due to -Zdoctest-xcompile + target: ${{ matrix.target }} + override: true + - name: Pre-build binaries (for inventory integration tests) + uses: actions-rs/cargo@v1 + with: + use-cross: true + command: build + args: --target ${{ matrix.target }} --workspace --all-features --verbose --release + - name: Run all tests + uses: actions-rs/cargo@v1 + with: + use-cross: true + command: test + args: -Zdoctest-xcompile --target ${{ matrix.target }} --workspace --all-features --verbose --release + + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + toolchain: 1.70.0 + override: true + components: rustfmt, clippy + - name: Check formatting + run: cargo fmt -- --check + - uses: actions-rs/clippy-check@v1 + with: + token: ${{ secrets.GITHUB_TOKEN }} + args: --all-targets --all-features --workspace -- -D clippy::all + + build-ffi: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + toolchain: nightly # required by cglue codegen + override: true + + - name: Validate c and c++ headers + run: cd memflow-ffi; ./verify_headers.sh + + - name: Build memflow + run: cargo build --workspace --release --verbose + + - name: Build c examples + run: cd memflow-ffi/examples/c; make all + - name: Build c++ examples + run: cd memflow-ffi/examples/cpp; make all + + build-nostd: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [macos-latest, ubuntu-latest, windows-latest] + steps: + - uses: actions/checkout@v2 + - name: Set up Rust nightly + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: nightly-2023-12-15 + override: true + - run: rustup toolchain install nightly-2023-12-15 + - run: rustup +nightly-2023-12-15 component add rust-src + - name: Build no_std crate + run: cd nostd-test; cargo +nightly-2023-12-15 build --all-features --verbose + + build-coverage: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + path: 'memflow-repo' + - name: Set up Rust nightly + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: nightly + override: true + - run: cargo install grcov + - name: Run tests with coverage + run: | + cd memflow-repo + export CARGO_INCREMENTAL=0 + export RUSTFLAGS="-Zprofile -Ccodegen-units=1 -Copt-level=0 -Clink-dead-code -Coverflow-checks=off -Zpanic_abort_tests -Cpanic=abort" + export RUSTDOCFLAGS="-Cpanic=abort" + cargo build --workspace --exclude memflow-derive --all-features + cargo test --workspace --exclude memflow-derive --all-features + grcov ./target/debug/ -s . -t lcov --llvm --branch --ignore-not-existing -o ./target/debug/coverage + bash <(curl -s https://codecov.io/bash) -f ./target/debug/coverage -t ${{ secrets.CODECOV_TOKEN }}; diff --git a/apex_dma/memflow_lib/.gitignore b/apex_dma/memflow_lib/.gitignore new file mode 100644 index 0000000..db42e82 --- /dev/null +++ b/apex_dma/memflow_lib/.gitignore @@ -0,0 +1,13 @@ +/target +**/*.rs.bk +*.swp +*.so +*.dll +*.dylib +.vscode +nostd-test/target +nostd-test/Cargo.lock +*.so +.vagrant +TODO.md +TODO.txt diff --git a/apex_dma/memflow_lib/CHANGES.md b/apex_dma/memflow_lib/CHANGES.md index e517465..408f7b8 100644 --- a/apex_dma/memflow_lib/CHANGES.md +++ b/apex_dma/memflow_lib/CHANGES.md @@ -2,6 +2,48 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org/). +## 0.2.0-beta11 +- Added dtb1 and dtb2 fields to ProcessInfo structure +- Added a function to the process trait which allows overriding dtb1/dtb2 with a custom value + +## 0.2.0-beta10 +- Removed all 'Inner' types and replaced them with GAT - this also shifts memflow to a minimum rust version of 1.70.0 +- Improved cache configuration when using plugins (usage: --connector kvm:::cache=true,cache_size=1kb,cache_time=10,cache_page_size=1000 where size and page_size is specified in hex) +- Added DelayedPhysicalMemory middleware (usage: --connector kvm:::delay=200 where delay is specified in microseconds) +- Added PhysicalMemoryMetrics middleware (usage: --connector kvm:::metrics=true) +- Updated FileIoMemory constructor with a default identity mapped memory mapping. +- Rewrote argument parser to properly handle quotes in complex arguments. + +## 0.2.0-beta9 +## 0.2.0-beta8 +- Hotfix for new bumpalo release + +## 0.2.0-beta7 +- Unified and simplified plugin proc macros and updated their documentation + +## 0.2.0-beta6 +- Added additional export/import/section helpers +- Dependency updates + +## 0.2.0-beta5 +- Cleaned up plugin search paths and matched them with memflowup +- Improved error messages +- Plugins are resolved to their canonical path before adding +- Added VirtualTranslate as optional trait on Os +- Updated to latest cglue + +## 0.2.0-beta4 +- Added missing functions to retrieve exports/imports/sections from kernel modules +- Added functions to retrieve primary kernel module + +## 0.2.0-beta3 +- Allow for PhysicalMemoryView to fill in gaps with zeros +## 0.2.0-beta2 +- Memory API and Address rework + +## 0.2.0-beta1 +- Entirely new cglue based plugin architecture and various other major improvements + ## 0.1.5 - Added memflow::prelude::v1 and memflow_win32::prelude::v1 modules - Added new fields to FFI diff --git a/apex_dma/memflow_lib/CONTRIBUTE.md b/apex_dma/memflow_lib/CONTRIBUTE.md index 87730ac..5cd893e 100644 --- a/apex_dma/memflow_lib/CONTRIBUTE.md +++ b/apex_dma/memflow_lib/CONTRIBUTE.md @@ -6,7 +6,7 @@ There is a feature missing? A bug you have noticed? Some inconsistencies? **Cont We welcome your contributions, and we love to keep our code standards high. So, there are a few key guidelines that you should follow for smooth sailing: -- All our code is formatted using rustfmt. Please, run `cargo fmt` before committing your changes. +- All our code is formatted using rustfmt. Please, run `cargo fmt --all` before committing your changes. - Make sure all of the tests pass with `cargo test`, as this would prevent us from merging your changes. - Make sure that clippy does not complain with `cargo clippy --all-targets --all-features --workspace -- -D warnings -D clippy::all` diff --git a/apex_dma/memflow_lib/Cargo.toml b/apex_dma/memflow_lib/Cargo.toml index 938407c..e66a690 100644 --- a/apex_dma/memflow_lib/Cargo.toml +++ b/apex_dma/memflow_lib/Cargo.toml @@ -1,24 +1,21 @@ - [profile.bench] debug = true [workspace] members = [ "memflow", - "memflow-win32", "memflow-ffi", - "memflow-win32-ffi", "memflow-bench", ] default-members = [ "memflow", - "memflow-win32", "memflow-ffi", - "memflow-win32-ffi", "memflow-bench", ] exclude = [ - "nostd-test", - "memflow-qemu-procfs" + "nostd-test" ] + +[patch.crates-io] +goblin = { git = "https://github.com/h33p/goblin", branch = "lossy-macho" } diff --git a/apex_dma/memflow_lib/LICENSE b/apex_dma/memflow_lib/LICENSE index dd842a4..cf3ebd7 100644 --- a/apex_dma/memflow_lib/LICENSE +++ b/apex_dma/memflow_lib/LICENSE @@ -1,7 +1,7 @@ MIT License -Copyright (c) 2020 ko1N -Copyright (c) 2020 Aurimas Blažulionis <0x60@pm.me> +Copyright (c) 2020-2022 ko1N +Copyright (c) 2020-2022 Aurimas Blažulionis <0x60@pm.me> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/apex_dma/memflow_lib/README.md b/apex_dma/memflow_lib/README.md index d17b228..9949d76 100644 --- a/apex_dma/memflow_lib/README.md +++ b/apex_dma/memflow_lib/README.md @@ -5,58 +5,81 @@ [![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE) [![Discord](https://img.shields.io/discord/738739624976973835?color=%20%237289da&label=Discord)](https://discord.gg/afsEtMR) -## physical memory introspection framework +## machine introspection made easy -memflow is a library that allows live memory introspection of running systems and their snapshots. Due to its modular approach it trivial to support almost any scenario where Direct Memory Access is available. +memflow is a library that enables introspection of various machines (hardware, virtual machines, memory dumps) in a generic fashion. There are 2 primary types of objects in memflow - _Connectors_ and _OS layers_. Connector provides raw access to physical memory of a machine. Meanwhile, OS layer builds a higher level abstraction over running operating system, providing access to running processes, input events, etc. These objects are incredibly flexible as they can be chained together to gain access to a process running multiple levels of virtualization deep (see figure below). -The very core of the library is a [PhysicalMemory](https://docs.rs/memflow/latest/memflow/mem/phys_mem/trait.PhysicalMemory.html) that provides direct memory access in an abstract environment. This object can be defined both statically, and dynamically with the use of the `inventory` feature. If `inventory` is enabled, it is possible to dynamically load libraries that provide Direct Memory Access. - -Through the use of OS abstraction layers, like [memflow-win32](https://github.com/memflow/memflow/tree/master/memflow-win32), users can gain access to virtual memory of individual processes by creating objects that implement [VirtualMemory](https://docs.rs/memflow/latest/memflow/mem/virt_mem/trait.VirtualMemory.html). - -Bridging the two is done by a highly throughput optimized virtual address translation function, which allows for crazy fast memory transfers at scale. - -The core is architecture-independent (as long as addresses fit in 64-bits), and currently, both 32, and 64-bit versions of the x86 family are available to be used. - -For non-rust libraries, it is possible to use the [FFI](https://github.com/memflow/memflow/tree/master/memflow-ffi) to interface with the library. +``` ++-----------+ +-----------+ +| native OS | | leechcore | ++-+---------+ +-+---------+ + | | + | +-----------+ | +----------+ + +->| QEMU VM | +->| Win32 OS | + +-+---------+ +-+--------+ + | | + | +----------+ | +-----------+ + +->| Win32 OS | +->| lsass.exe | + +-+--------+ +-----------+ + | + | +-----------+ + +->| Hyper-V | + +-+---------+ + | + | +----------+ + +->| Linux OS | + +-+--------+ + | + | +-----------+ + +->| SSHD Proc | + +-----------+ + +(Example chains of access. For illustrative purposes only - Hyper-V Connector and Linux OS are not yet available) +``` -In the repository, you can find various examples available (which use the memflow-win32 layer) +As a library user, you do not have to worry about delicacies of chaining - everything is provided, batteries included. See one of our [examples](memflow/examples/process_list.rs) on how simple it is to build a chain (excluding parsing). All Connectors and OS layers are dynamically loadable with common interface binding them. -## Building from source +All of this flexibility is provided with very robust and efficient backend - memory interface is batchable and divisible, which gets taken advantage of by our throughput optimized virtual address translation pipeline that is able to walk the entire process virtual address space in under a second. Connectors and OS layers can be composed with the vast library of generic caching mechanisms, utility functions and data structures. -To build all projects in the memflow workspace: +The memflow ecosystem is not bound to just Rust - Connector and OS layer functions are linked together using C ABI, thus users can write code that interfaces with them in other languages, such as C, C++, Zig, etc. In addition, these plugins can too be implemented in foreign languages - everything is open. -`cargo build --release --workspace` +Overall, memflow is the most robust, efficient and flexible solution out there for machine introspection. -To build all examples: +## Getting started -`cargo build --release --workspace --examples` +Make sure that your rustc version is at least `1.70.0` or newer. -Run all tests: +memflow uses a plugin based approach and is capable of loading different physical memory backends (so-called [`connectors`](#connectors)) at runtime. On top of the physical memory backends memflow is also capable of loading plugins for interfacing with a specific target OS at runtime. -`cargo test --workspace` +To get started, you want to at least install one connector. For that, use the [memflowup](https://github.com/memflow/memflowup) utility (use dev channel). -Execute the benchmarks: +### Manual installation -`cargo bench` +The recommended installation locations for connectors on Linux are: +``` +/usr/lib/memflow/libmemflow_xxx.so +$HOME/.local/lib/memflow/libmemflow_xxx.so +``` -## Documentation +The recommended installation locations for connectors on Windows are: +``` +[Username]/Documents/memflow/libmemflow_xxx.dll +``` -Extensive code documentation can be found at [docs.rs](https://docs.rs/memflow/0.1/). +Additionally, connectors can be placed in any directory of the environment PATH or the working directory of the program as well. -An additional getting started guide as well as a higher level -explanation of the inner workings of memflow can be found at [memflow.github.io](https://memflow.github.io). +For more information about how to get started with memflow please head over to the YouTube series produced by [h33p](https://github.com/h33p/): -If you decide to build the latest documentation you can do it by issuing: +- [memflow basics](https://www.youtube.com/playlist?list=PLrC4R7zDrxB3RSJQk9ahmXNCw8m3pdP6z) +- [memflow applied](https://www.youtube.com/watch?v=xJXkRMy71dc&list=PLrC4R7zDrxB17iWCy9eEdCaluCR3Bkn8q) -`cargo doc --workspace --no-deps --open` - -## Basic usage +## Running Examples You can either run one of the examples with `cargo run --release --example`. Pass nothing to get a list of examples. -Some connectors like `qemu_procfs` will require elevated privileges. See the Connectors section of this Readme for more information. +Some connectors like `qemu` will require elevated privileges. Refer to the readme of the connector for additional information on their required access rights. -To simplify running examples, tests, and benchmarks through different connectors we added a simple cargo runner script for Linux to this repository. +To simplify running examples, tests, and benchmarks through different connectors, we added a simple cargo runner script for Linux to this repository. Simply set any of the following environment variables when running the `cargo` command to elevate privileges: - `RUST_SUDO` will start the resulting binary via sudo. @@ -64,78 +87,75 @@ Simply set any of the following environment variables when running the `cargo` c Alternatively, you can run the benchmarks via `cargo bench` (can pass regex filters). Win32 benchmarks currently work only on Linux. -## Running Examples +All examples support the memflow connector `plugins` inventory system. +You will have to install at least one `connector` to use the examples. Refer to the [getting started](#getting-started) section for more details. -All examples support the memflow connector inventory system. -You will have to install at least one `connector` to use the examples. +Run memflow/read\_keys example with a qemu connector: -To install a connector just use the [memflowup](https://github.com/memflow/memflowup) utility, -or, head over to the corresponding repository and install them via the `install.sh` script. +`RUST_SETPTRACE=1 cargo run --example read_keys -- -vv -c qemu -a [vmname] -o win32` -You will find a folder called `memflow` in any of the following locations: -``` -/opt -/lib -/usr/lib/ -/usr/local/lib -/lib32 -/lib64 -/usr/lib32 -/usr/lib64 -/usr/local/lib32 -/usr/local/lib64 -``` +Run memflow/read\_bench example with a coredump connector: -On Windows, you can put the connector DLL in a folder named `memflow` -that is either in your current PATH or put it in `C:\Users\{Username}\.local\lib\memflow`. -Additionally connectors can be placed in the working directory of the process as well. +`cargo run --example read_bench --release -- -vv -c coredump -a coredump_win10_64bit.raw -o win32` -Now you can just run the examples by providing the appropriate connector name: +Note: In the examples above the `qemu` connector requires `'CAP_SYS_PTRACE=ep'` permissions. The runner script in this repository will set the appropriate flags when the `RUST_SETPTRACE` environment variable is passed to it. -Run memflow\_win32/read\_keys example with a procfs connector: +## Documentation -`RUST_SETPTRACE=1 cargo run --example read_keys -- -vv -c qemu_procfs -a [vmname]` +Extensive code documentation can be found at [docs.rs](https://docs.rs/memflow/0.2.0-beta/) +(it currently is relatively out of date). -Run memflow\_win32/read\_bench example with a coredump connector: +An additional getting started guide as well as a higher level +explanation of the inner workings of memflow can be found at [memflow.github.io](https://memflow.github.io). -`cargo run --example read_bench --release -- -vv -c coredump -a coredump_win10_64bit.raw` +If you decide to build the latest documentation you can do it by issuing: -Note: In the examples above the `qemu_procfs` connector requires `'CAP_SYS_PTRACE=ep'` permissions. The runner script in this repository will set the appropriate flags when the `RUST_SETPTRACE` environment variable is passed to it. +`cargo doc --workspace --no-deps --open` ## Compilation support +memflow currently requires at least rustc version `1.70.0` or newer. + | target | build | tests | benches | compiles on stable | |---------------|--------------------|--------------------|--------------------|--------------------| | linux x86_64 | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | mac x86_64 | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | win x86_64 | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | linux aarch64 | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | +| linux i686 | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | +| linux armv7 | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | no-std | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :x: | ## Target support -memflow-win32 is tested on the latest Windows 10 versions all the way down to Windows NT 4.0. If you found a version that does not work please submit an issue with the major/minor version as well as the build number. +By default, memflow supports analyzing 64-bit machines on any machine - be it 32 or 64 bit. Using memflow without `default_features` can disable 64-bit support on 32-bit machines for an efficiency gain, while enabling `128_bit_mem` feature can be done for theoretical future 128-bit machine analysis. Note that all connectors and OS layers must be compiled with the same memory features enabled, and memflowup currently only compiles the default set of features. + +memflow-win32 is tested on the latest Windows 11 and Windows 10 versions all the way down to Windows NT 4.0. If you found a version that does not work please submit an issue with the major/minor version as well as the build number. ## Connectors -All examples provided in this repository are using the inventory to +All examples provided in this repository are using the `plugins` inventory to dynamically load a connector at runtime. When using the library programmatically it is possible to just statically link a connector into the code. Some connectors also require different permissions. Please refer to the individual connector repositories for more information. These are the currently officially existing connectors: -- [qemu_procfs](https://github.com/memflow/memflow-qemu-procfs) +- [qemu](https://github.com/memflow/memflow-qemu-procfs) - [kvm](https://github.com/memflow/memflow-kvm) - [pcileech](https://github.com/memflow/memflow-pcileech) - [coredump](https://github.com/memflow/memflow-coredump) -In case you write your own connector please hit us up with a merge request so we can maintain a list of third-party connectors as well. +In case you write your own connector please hit us up with a pull request so we can maintain a list of third-party connectors as well. + +## Build on memflow + +Officialy supported projects: +- [memflow-py](https://github.com/memflow/memflow-py) Python Wrapper for memflow (thanks to [emesare](https://github.com/emesare)) -## Road map / Future Development -- Provide a rust native connector for PCILeech based hardware -- Provide a UEFI Demo -- Linux target support +Additional projects from the community: +- [.NET wrapper for memflow-ffi](https://github.com/uberhalit/memflow.NET) by [uberhalit](https://github.com/uberhalit) +- [rhai integration](https://github.com/dankope/rhai-memflow) by [emesare](https://github.com/emesare) ## Acknowledgements - [CasualX](https://github.com/casualx/) for his wonderful pelite crate diff --git a/apex_dma/memflow_lib/RELEASE.md b/apex_dma/memflow_lib/RELEASE.md new file mode 100644 index 0000000..280acc0 --- /dev/null +++ b/apex_dma/memflow_lib/RELEASE.md @@ -0,0 +1,21 @@ +# Checklist: +- memflow +- memflow-ffi + +# Plugins: +- memflow-win32 +- memflow-pcileech +- memflow-reclass-plugin +- memflow-native +- memflow-qemu +- memflow-kcore +- memflow-cmake-example +- memflow-kvm +- memflow-coredump +- memflow-linux +- memflow-microvmi + +# Tools: +- cloudflow +- scanflow +- reflow diff --git a/apex_dma/memflow_lib/memflow-bench/Cargo.toml b/apex_dma/memflow_lib/memflow-bench/Cargo.toml index a3c2c9a..908ac38 100644 --- a/apex_dma/memflow_lib/memflow-bench/Cargo.toml +++ b/apex_dma/memflow_lib/memflow-bench/Cargo.toml @@ -1,30 +1,29 @@ [package] name = "memflow-bench" -version = "0.1.5" -authors = ["Aurimas Blažulionis <0x60@pm.me>"] +version = "0.2.0" +authors = ["Aurimas Blažulionis <0x60@pm.me>", "ko1N "] edition = "2018" description = "benchmarks for the memflow physical memory introspection framework" readme = "README.md" homepage = "https://memflow.github.io" repository = "https://github.com/memflow/memflow" -license-file = "../LICENSE" +license = "MIT" keywords = [ "memflow", "introspection", "memory", "dma" ] categories = [ "memory-management", "os" ] publish = false [dependencies] -memflow = { path = "../memflow", features = ["dummy_mem"] } -rand = "0.7" -rand_xorshift = "0.2" +memflow = { version = "0.2", path = "../memflow", features = ["dummy_mem"] } +log = "^0.4.14" +rand = "^0.8.4" +rand_xorshift = "^0.3" # This branch provides throughput plots -criterion = { git = "https://github.com/h33p/criterion.rs.git", branch = "tput" } - -memflow-win32 = { path = "../memflow-win32" } +criterion = { git = "https://github.com/h33p/criterion.rs.git", branch = "tput2" } [dev-dependencies] -memflow = { path = "../memflow", features = ["dummy_mem"] } -memflow-win32 = { path = "../memflow-win32" } +memflow = { version = "0.2", path = "../memflow", features = ["dummy_mem", "plugins"] } +simplelog = "^0.12.0" [features] default = [] @@ -33,9 +32,9 @@ default = [] name = "read_dummy" harness = false -#[[bench]] -#name = "read_win32" -#harness = false +[[bench]] +name = "read_win32" +harness = false [[bench]] name = "batcher" diff --git a/apex_dma/memflow_lib/memflow-bench/benches/batcher.rs b/apex_dma/memflow_lib/memflow-bench/benches/batcher.rs index 79046aa..dc16cd1 100644 --- a/apex_dma/memflow_lib/memflow-bench/benches/batcher.rs +++ b/apex_dma/memflow_lib/memflow-bench/benches/batcher.rs @@ -2,31 +2,35 @@ use criterion::*; use memflow::prelude::v1::*; -//use memflow::mem::dummy::DummyMemory as Memory; +use std::convert::TryInto; + +//use memflow::dummy::DummyMemory as Memory; struct NullMem {} impl NullMem { - pub fn new(_: usize) -> Self { + pub fn new(_: umem) -> Self { Self {} } } impl PhysicalMemory for NullMem { - fn phys_read_raw_list(&mut self, data: &mut [PhysicalReadData]) -> Result<()> { - black_box(data.iter_mut().count()); + fn phys_read_raw_iter(&mut self, data: PhysicalReadMemOps) -> Result<()> { + black_box(data); Ok(()) } - fn phys_write_raw_list(&mut self, data: &[PhysicalWriteData]) -> Result<()> { - black_box(data.iter().count()); + fn phys_write_raw_iter(&mut self, data: PhysicalWriteMemOps) -> Result<()> { + black_box(data); Ok(()) } fn metadata(&self) -> PhysicalMemoryMetadata { PhysicalMemoryMetadata { - size: 0, + max_address: Address::NULL, + real_size: 0, readonly: true, + ideal_batch_size: u32::MAX, } } } @@ -39,41 +43,42 @@ use rand_xorshift::XorShiftRng as CurRng; static mut TSLICE: [[u8; 16]; 0x10000] = [[0; 16]; 0x10000]; -fn read_test_nobatcher( +fn read_test_nobatcher( chunk_size: usize, mem: &mut T, mut rng: CurRng, - size: usize, - tbuf: &mut [PhysicalReadData], + size: umem, + tbuf: &mut [ReadDataRaw], ) { - let base_addr = Address::from(rng.gen_range(0, size)); + let base_addr = Address::from(rng.gen_range(0..size)); - for PhysicalReadData(addr, _) in tbuf.iter_mut().take(chunk_size) { - *addr = (base_addr + rng.gen_range(0, 0x2000)).into(); + for CTup3(addr, _, _) in tbuf.iter_mut().take(chunk_size) { + *addr = base_addr + rng.gen_range(0usize..0x2000); } - let _ = black_box(mem.phys_read_raw_list(&mut tbuf[..chunk_size])); + let iter = tbuf[..chunk_size] + .iter_mut() + .map(|CTup3(a, b, c): &mut ReadDataRaw| CTup3(*a, *b, c.into())); + + let _ = black_box(MemOps::with_raw(iter, None, None, |data| { + mem.read_raw_iter(data) + })); } -fn read_test_batcher( - chunk_size: usize, - mem: &mut T, - mut rng: CurRng, - size: usize, -) { - let base_addr = Address::from(rng.gen_range(0, size)); +fn read_test_batcher(chunk_size: usize, mem: &mut T, mut rng: CurRng, size: umem) { + let base_addr = Address::from(rng.gen_range(0..size)); - let mut batcher = mem.phys_batcher(); - batcher.read_prealloc(chunk_size); + let mut batcher = mem.batcher(); + batcher.reserve(chunk_size); for i in unsafe { TSLICE.iter_mut().take(chunk_size) } { - batcher.read_into((base_addr + rng.gen_range(0, 0x2000)).into(), i); + batcher.read_into(base_addr + rng.gen_range(0usize..0x2000), i); } let _ = black_box(batcher.commit_rw()); } -fn read_test_with_ctx( +fn read_test_with_ctx( bench: &mut Bencher, chunk_size: usize, use_batcher: bool, @@ -81,7 +86,7 @@ fn read_test_with_ctx( ) { let rng = CurRng::from_rng(thread_rng()).unwrap(); - let mem_size = size::mb(64); + let mem_size = mem::mb(64); let mut tbuf = vec![]; @@ -89,7 +94,7 @@ fn read_test_with_ctx( unsafe { TSLICE } .iter_mut() .map(|arr| { - PhysicalReadData(PhysicalAddress::INVALID, unsafe { + CTup3(Address::INVALID, Address::INVALID, unsafe { std::mem::transmute(&mut arr[..]) }) }) @@ -119,9 +124,9 @@ fn chunk_read_params( |b, &chunk_size| { read_test_with_ctx( b, - black_box(chunk_size as usize), + black_box(chunk_size.try_into().unwrap()), use_batcher, - &mut initialize_ctx(), + &mut initialize_ctx().into_phys_view(), ) }, ); @@ -135,20 +140,20 @@ fn chunk_read( ) { let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); - let group_name = format!("{}_batched_read", backend_name); + let group_name = format!("{backend_name}_batched_read"); let mut group = c.benchmark_group(group_name.clone()); group.plot_config(plot_config); chunk_read_params( &mut group, - format!("{}_without", group_name), + format!("{group_name}_without"), false, initialize_ctx, ); chunk_read_params( &mut group, - format!("{}_with", group_name), + format!("{group_name}_with"), true, initialize_ctx, ); @@ -156,13 +161,13 @@ fn chunk_read( criterion_group! { name = dummy_read; config = Criterion::default() - .warm_up_time(std::time::Duration::from_millis(300)) - .measurement_time(std::time::Duration::from_millis(2700)); + .warm_up_time(std::time::Duration::from_millis(500)) + .measurement_time(std::time::Duration::from_millis(5000)); targets = dummy_read_group } fn dummy_read_group(c: &mut Criterion) { - chunk_read(c, "dummy", &|| Memory::new(size::mb(64))); + chunk_read(c, "dummy", &|| Memory::new(mem::mb(64))); } criterion_main!(dummy_read); diff --git a/apex_dma/memflow_lib/memflow-bench/benches/read_dummy.rs b/apex_dma/memflow_lib/memflow-bench/benches/read_dummy.rs index 632781a..8b35e71 100644 --- a/apex_dma/memflow_lib/memflow-bench/benches/read_dummy.rs +++ b/apex_dma/memflow_lib/memflow-bench/benches/read_dummy.rs @@ -3,39 +3,26 @@ use memflow_bench::*; use criterion::*; -use memflow::mem::dummy::{DummyMemory as Memory, DummyModule, DummyProcess}; +use memflow::dummy::DummyMemory as Memory; use memflow::prelude::v1::*; -fn initialize_virt_ctx() -> Result<( - Memory, - DirectTranslate, - DummyProcess, - impl ScopedVirtualTranslate, - DummyModule, -)> { - let mut mem = Memory::new(size::mb(64)); - - let vat = DirectTranslate::new(); - - let proc = mem.alloc_process(size::mb(60), &[]); - let module = proc.get_module(size::mb(4)); - let translator = proc.translator(); - Ok((mem, vat, proc, translator, module)) +fn initialize_virt_ctx(cache_size: usize, use_tlb: bool) -> Result> { + util::build_os("", cache_size, "dummy", use_tlb) } fn dummy_read_group(c: &mut Criterion) { - virt::seq_read(c, "dummy", &initialize_virt_ctx); - virt::chunk_read(c, "dummy", &initialize_virt_ctx); + virt::seq_read(c, "dummy", &initialize_virt_ctx, false); + virt::chunk_read(c, "dummy", &initialize_virt_ctx, false); phys::seq_read(c, "dummy", &|| Ok(Memory::new(size::mb(64)))); phys::chunk_read(c, "dummy", &|| Ok(Memory::new(size::mb(64)))); - vat::chunk_vat(c, "dummy", &initialize_virt_ctx); + vat::chunk_vat(c, "dummy", &initialize_virt_ctx, false); } criterion_group! { name = dummy_read; config = Criterion::default() - .warm_up_time(std::time::Duration::from_millis(300)) - .measurement_time(std::time::Duration::from_millis(2700)); + .warm_up_time(std::time::Duration::from_millis(1000)) + .measurement_time(std::time::Duration::from_millis(10000)); targets = dummy_read_group } diff --git a/apex_dma/memflow_lib/memflow-bench/benches/read_win32.rs b/apex_dma/memflow_lib/memflow-bench/benches/read_win32.rs index 666946d..7f35f54 100644 --- a/apex_dma/memflow_lib/memflow-bench/benches/read_win32.rs +++ b/apex_dma/memflow_lib/memflow-bench/benches/read_win32.rs @@ -1,85 +1,39 @@ extern crate memflow_bench; -use memflow_bench::{phys, vat, virt}; +use memflow_bench::{phys, util, vat, virt}; use criterion::*; -use memflow::error::{Error, Result}; use memflow::prelude::v1::*; -use memflow_win32::prelude::v1::*; -use rand::prelude::*; -use rand::{Rng, SeedableRng}; -use rand_xorshift::XorShiftRng as CurRng; +fn create_connector(args: Option<&ConnectorArgs>) -> Result { + // this workaround is to prevent loaded libraries + // from spitting out to much log information and skewing benchmarks + let filter = log::max_level(); + log::set_max_level(log::Level::Error.to_level_filter()); -fn create_connector(args: &ConnectorArgs) -> Result { - unsafe { memflow::connector::ConnectorInventory::scan().create_connector("qemu_procfs", args) } -} - -fn initialize_virt_ctx() -> Result<( - impl PhysicalMemory, - DirectTranslate, - Win32ProcessInfo, - impl ScopedVirtualTranslate, - Win32ModuleInfo, -)> { - let mut phys_mem = create_connector(&ConnectorArgs::new())?; - - let kernel_info = KernelInfo::scanner(&mut phys_mem) - .scan() - .map_err(|_| Error::Other("unable to find kernel"))?; - let mut vat = DirectTranslate::new(); - let offsets = Win32Offsets::builder() - .kernel_info(&kernel_info) - .build() - .map_err(|_| Error::Other("unable to initialize win32 offsets with guid"))?; - - let mut kernel = Kernel::new(&mut phys_mem, &mut vat, offsets, kernel_info); + let result = Inventory::scan().create_connector("qemu", None, args)?; - let mut rng = CurRng::from_rng(thread_rng()).unwrap(); - - let proc_list = kernel - .process_info_list() - .map_err(|_| Error::Other("unable to read process list"))?; - for i in -100..(proc_list.len() as isize) { - let idx = if i >= 0 { - i as usize - } else { - rng.gen_range(0, proc_list.len()) - }; - - let mod_list: Vec = { - let mut prc = Win32Process::with_kernel_ref(&mut kernel, proc_list[idx].clone()); - prc.module_list() - .unwrap_or_default() - .into_iter() - .filter(|module| module.size > 0x1000) - .collect() - }; - - if !mod_list.is_empty() { - let tmod = &mod_list[rng.gen_range(0, mod_list.len())]; - let proc = proc_list[idx].clone(); - let translator = proc.translator(); - return Ok((phys_mem, vat, proc, translator, tmod.clone())); // TODO: remove clone of mem + vat - } - } + log::set_max_level(filter); + Ok(result) +} - Err("No module found!".into()) +fn initialize_virt_ctx(cache_size: usize, use_tlb: bool) -> Result> { + util::build_os("qemu", cache_size, "win32", use_tlb) } fn win32_read_group(c: &mut Criterion) { - virt::seq_read(c, "win32", &initialize_virt_ctx); - virt::chunk_read(c, "win32", &initialize_virt_ctx); - phys::seq_read(c, "win32", &|| create_connector(&ConnectorArgs::new())); - phys::chunk_read(c, "win32", &|| create_connector(&ConnectorArgs::new())); - vat::chunk_vat(c, "win32", &initialize_virt_ctx); + virt::seq_read(c, "win32", &initialize_virt_ctx, true); + virt::chunk_read(c, "win32", &initialize_virt_ctx, true); + phys::seq_read(c, "win32", &|| create_connector(None)); + phys::chunk_read(c, "win32", &|| create_connector(None)); + vat::chunk_vat(c, "win32", &initialize_virt_ctx, true); } criterion_group! { name = win32_read; config = Criterion::default() - .warm_up_time(std::time::Duration::from_millis(300)) - .measurement_time(std::time::Duration::from_millis(2700)); + .warm_up_time(std::time::Duration::from_millis(1000)) + .measurement_time(std::time::Duration::from_millis(10000)); targets = win32_read_group } diff --git a/apex_dma/memflow_lib/memflow-bench/src/lib.rs b/apex_dma/memflow_lib/memflow-bench/src/lib.rs index 8c06806..5fd8cac 100644 --- a/apex_dma/memflow_lib/memflow-bench/src/lib.rs +++ b/apex_dma/memflow_lib/memflow-bench/src/lib.rs @@ -1,3 +1,4 @@ pub mod phys; +pub mod util; pub mod vat; pub mod virt; diff --git a/apex_dma/memflow_lib/memflow-bench/src/phys.rs b/apex_dma/memflow_lib/memflow-bench/src/phys.rs index 5f9ac7d..a50e49e 100644 --- a/apex_dma/memflow_lib/memflow-bench/src/phys.rs +++ b/apex_dma/memflow_lib/memflow-bench/src/phys.rs @@ -1,8 +1,9 @@ use criterion::*; -use memflow::mem::{CachedMemoryAccess, PhysicalMemory}; +use memflow::mem::{CachedPhysicalMemory, MemOps, PhysicalMemory}; use memflow::architecture; +use memflow::cglue::*; use memflow::error::Result; use memflow::mem::PhysicalReadData; use memflow::types::*; @@ -11,9 +12,11 @@ use rand::prelude::*; use rand::{Rng, SeedableRng}; use rand_xorshift::XorShiftRng as CurRng; -fn rwtest( +use std::convert::TryInto; + +fn rwtest( bench: &mut Bencher, - mem: &mut T, + mut mem: impl PhysicalMemory, (start, end): (Address, Address), chunk_sizes: &[usize], chunk_counts: &[usize], @@ -25,29 +28,35 @@ fn rwtest( for i in chunk_sizes { for o in chunk_counts { - let mut vbufs = vec![vec![0 as u8; *i]; *o]; + let mut vbufs = vec![vec![0_u8; *i]; *o]; let mut done_size = 0; while done_size < read_size { - let base_addr = rng.gen_range(start.as_u64(), end.as_u64()); + let base_addr = rng.gen_range(start.to_umem()..end.to_umem()); let mut bufs = Vec::with_capacity(*o); bufs.extend(vbufs.iter_mut().map(|vec| { - let addr = (base_addr + rng.gen_range(0, 0x2000)).into(); + let addr = (base_addr + rng.gen_range(0..0x2000)).into(); - PhysicalReadData( + CTup3( PhysicalAddress::with_page( addr, PageType::default().write(true), - size::kb(4), + mem::kb(4), ), - vec.as_mut_slice(), + Address::NULL, + vec.as_mut_slice().into(), ) })); bench.iter(|| { - let _ = black_box(mem.phys_read_raw_list(&mut bufs)); + let iter = bufs + .iter_mut() + .map(|CTup3(a, b, d): &mut PhysicalReadData| CTup3(*a, *b, d.into())); + let _ = black_box(MemOps::with_raw(iter, None, None, |data| { + mem.phys_read_raw_iter(data) + })); }); done_size += *i * *o; @@ -60,9 +69,9 @@ fn rwtest( total_size } -fn read_test_with_mem( +fn read_test_with_mem( bench: &mut Bencher, - mem: &mut T, + mem: impl PhysicalMemory, chunk_size: usize, chunks: usize, start_end: (Address, Address), @@ -77,29 +86,35 @@ fn read_test_with_mem( )); } -fn read_test_with_ctx( +fn read_test_with_ctx( bench: &mut Bencher, cache_size: u64, chunk_size: usize, chunks: usize, - mut mem: T, + mem: impl PhysicalMemory, ) { let mut rng = CurRng::from_rng(thread_rng()).unwrap(); - let start = Address::from(rng.gen_range(0, size::mb(50))); + let start = Address::from(rng.gen_range(0..size::mb(50))); let end = start + size::mb(1); if cache_size > 0 { - let mut mem = CachedMemoryAccess::builder(&mut mem) + let mut cached_mem = CachedPhysicalMemory::builder(mem) .arch(architecture::x86::x64::ARCH) .cache_size(size::mb(cache_size as usize)) .page_type_mask(PageType::PAGE_TABLE | PageType::READ_ONLY | PageType::WRITEABLE) .build() .unwrap(); - read_test_with_mem(bench, &mut mem, chunk_size, chunks, (start, end)); + read_test_with_mem( + bench, + cached_mem.forward_mut(), + chunk_size, + chunks, + (start, end), + ); } else { - read_test_with_mem(bench, &mut mem, chunk_size, chunks, (start, end)); + read_test_with_mem(bench, mem, chunk_size, chunks, (start, end)); } } @@ -118,9 +133,9 @@ fn seq_read_params( read_test_with_ctx( b, black_box(cache_size), - black_box(size as usize), + black_box(size.try_into().unwrap()), black_box(1), - initialize_ctx().unwrap(), + initialize_ctx().unwrap().forward_mut(), ) }, ); @@ -137,15 +152,15 @@ fn chunk_read_params( for &chunk_size in [1, 4, 16, 64].iter() { group.throughput(Throughput::Bytes(size * chunk_size)); group.bench_with_input( - BenchmarkId::new(format!("{}_s{:x}", func_name, size), size * chunk_size), + BenchmarkId::new(format!("{func_name}_s{size:x}"), size * chunk_size), &size, |b, &size| { read_test_with_ctx( b, black_box(cache_size), - black_box(size as usize), - black_box(chunk_size as usize), - initialize_ctx().unwrap(), + black_box(size.try_into().unwrap()), + black_box(chunk_size.try_into().unwrap()), + initialize_ctx().unwrap().forward_mut(), ) }, ); @@ -160,23 +175,18 @@ pub fn seq_read( ) { let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); - let group_name = format!("{}_phys_seq_read", backend_name); + let group_name = format!("{backend_name}_phys_seq_read"); let mut group = c.benchmark_group(group_name.clone()); group.plot_config(plot_config); seq_read_params( &mut group, - format!("{}_nocache", group_name), + format!("{group_name}_nocache"), 0, initialize_ctx, ); - seq_read_params( - &mut group, - format!("{}_cache", group_name), - 2, - initialize_ctx, - ); + seq_read_params(&mut group, format!("{group_name}_cache"), 2, initialize_ctx); } pub fn chunk_read( @@ -186,21 +196,16 @@ pub fn chunk_read( ) { let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); - let group_name = format!("{}_phys_chunk_read", backend_name); + let group_name = format!("{backend_name}_phys_chunk_read"); let mut group = c.benchmark_group(group_name.clone()); group.plot_config(plot_config); chunk_read_params( &mut group, - format!("{}_nocache", group_name), + format!("{group_name}_nocache"), 0, initialize_ctx, ); - chunk_read_params( - &mut group, - format!("{}_cache", group_name), - 2, - initialize_ctx, - ); + chunk_read_params(&mut group, format!("{group_name}_cache"), 2, initialize_ctx); } diff --git a/apex_dma/memflow_lib/memflow-bench/src/util.rs b/apex_dma/memflow_lib/memflow-bench/src/util.rs new file mode 100644 index 0000000..ab39d3b --- /dev/null +++ b/apex_dma/memflow_lib/memflow-bench/src/util.rs @@ -0,0 +1,79 @@ +use memflow::prelude::v1::*; + +pub fn build_os( + conn_name: &str, + cache_size: usize, + os_name: &str, + use_tlb: bool, +) -> Result> { + // this workaround is to prevent loaded libraries + // from spitting out to much log information and skewing benchmarks + let filter = log::max_level(); + log::set_max_level(log::Level::Debug.to_level_filter()); + + let inventory = Inventory::scan(); + + log::set_max_level(log::Level::Error.to_level_filter()); + + let mut args = Args::new(); + + if !use_tlb { + args = args.insert("vatcache", "none"); + } + + let page_cache_params = if cache_size > 0 { + Some(ConnectorMiddlewareArgs::new().cache_size(cache_size)) + } else { + None + }; + + let conn_args = ConnectorArgs::new(None, Default::default(), page_cache_params); + let args = OsArgs::new(None, args); + + let ret = if conn_name.is_empty() { + inventory.builder().os(os_name).args(args).build() + } else { + inventory + .builder() + .connector(conn_name) + .args(conn_args) + .os(os_name) + .args(args) + .build() + }?; + + log::set_max_level(filter); + + Ok(ret) +} + +pub fn find_proc(os: &mut T) -> Result<(::ProcessType<'_>, ModuleInfo)> { + let infos = os.process_info_list()?; + + let mut data = None; + + for info in infos { + if let Ok(mut proc) = os.process_by_info(info.clone()) { + let mut module = None; + + proc.module_list_callback( + None, + (&mut |info: ModuleInfo| { + if info.size > 0x1000 { + module = Some(info); + } + module.is_none() + }) + .into(), + )?; + + if let Some(module) = module { + data = Some((info, module)); + break; + } + } + } + + data.and_then(move |(info, module)| Some((os.process_by_info(info).ok()?, module))) + .ok_or_else(|| ErrorKind::NotFound.into()) +} diff --git a/apex_dma/memflow_lib/memflow-bench/src/vat.rs b/apex_dma/memflow_lib/memflow-bench/src/vat.rs index 72090fb..10f31ea 100644 --- a/apex_dma/memflow_lib/memflow-bench/src/vat.rs +++ b/apex_dma/memflow_lib/memflow-bench/src/vat.rs @@ -1,218 +1,129 @@ use criterion::*; -use memflow::mem::{CachedMemoryAccess, CachedVirtualTranslate, PhysicalMemory, VirtualTranslate}; - -use memflow::architecture::ScopedVirtualTranslate; - -use memflow::error::Result; -use memflow::iter::FnExtend; -use memflow::process::*; -use memflow::types::*; +use memflow::cglue::as_mut; +use memflow::mem::virt_translate::*; +use memflow::prelude::v1::*; use rand::prelude::*; use rand::{Rng, SeedableRng}; use rand_xorshift::XorShiftRng as CurRng; -fn vat_test_with_mem< - T: PhysicalMemory, - V: VirtualTranslate, - S: ScopedVirtualTranslate, - M: OsProcessModuleInfo, ->( +use std::convert::TryInto; + +fn vat_test_with_mem( bench: &mut Bencher, - phys_mem: &mut T, - vat: &mut V, + vat: &mut impl VirtualTranslate, chunk_count: usize, translations: usize, - translator: S, - module: M, -) -> usize { + module: ModuleInfo, +) { let mut rng = CurRng::from_rng(thread_rng()).unwrap(); - let mut bufs = vec![Address::null(); chunk_count]; - let mut done_size = 0; + let mut bufs = vec![CTup2(Address::null(), 1); translations]; - let mut out = Vec::new(); + let base_addr = rng.gen_range(module.base.to_umem()..(module.base.to_umem() + module.size)); - while done_size < translations { - let base_addr = rng.gen_range( - module.base().as_u64(), - module.base().as_u64() + module.size() as u64, - ); + for CTup2(address, _) in bufs.iter_mut() { + *address = (base_addr + rng.gen_range(0..0x2000)).into(); + } - for addr in bufs.iter_mut() { - *addr = (base_addr + rng.gen_range(0, 0x2000)).into(); - } + let mut out = vec![]; - bench.iter(|| { + bench.iter(|| { + for chunk in bufs.chunks_mut(chunk_count) { out.clear(); - vat.virt_to_phys_iter( - phys_mem, - &translator, - bufs.iter_mut().map(|x| (*x, 1)), - &mut out, - &mut FnExtend::new(|_| {}), - ); + vat.virt_to_phys_list(chunk, (&mut out).into(), (&mut |_| true).into()); black_box(&out); - }); - - done_size += chunk_count; - } - - done_size + } + }); } -fn vat_test_with_ctx< - T: PhysicalMemory, - V: VirtualTranslate, - P: OsProcessInfo, - S: ScopedVirtualTranslate, - M: OsProcessModuleInfo, ->( +fn vat_test_with_os( bench: &mut Bencher, - cache_size: u64, chunks: usize, translations: usize, - use_tlb: bool, - (mut mem, mut vat, prc, translator, tmod): (T, V, P, S, M), + os: &mut OsInstanceArcBox<'static>, ) { - if cache_size > 0 { - let cache = CachedMemoryAccess::builder(&mut mem) - .arch(prc.sys_arch()) - .cache_size(size::mb(cache_size as usize)) - .page_type_mask(PageType::PAGE_TABLE | PageType::READ_ONLY | PageType::WRITEABLE); - - if use_tlb { - let mut mem = cache.build().unwrap(); - let mut vat = CachedVirtualTranslate::builder(vat) - .arch(prc.sys_arch()) - .build() - .unwrap(); - vat_test_with_mem( - bench, - &mut mem, - &mut vat, - chunks, - translations, - translator, - tmod, - ); - } else { - let mut mem = cache.build().unwrap(); - vat_test_with_mem( - bench, - &mut mem, - &mut vat, - chunks, - translations, - translator, - tmod, - ); - } - } else if use_tlb { - let mut vat = CachedVirtualTranslate::builder(vat) - .arch(prc.sys_arch()) - .build() - .unwrap(); - vat_test_with_mem( - bench, - &mut mem, - &mut vat, - chunks, - translations, - translator, - tmod, - ); - } else { - vat_test_with_mem( - bench, - &mut mem, - &mut vat, - chunks, - translations, - translator, - tmod, - ); - } + let (mut process, module) = crate::util::find_proc(os).unwrap(); + + vat_test_with_mem( + bench, + as_mut!(process impl VirtualTranslate).unwrap(), + chunks, + translations, + module, + ); } -fn chunk_vat_params< - T: PhysicalMemory, - V: VirtualTranslate, - P: OsProcessInfo, - S: ScopedVirtualTranslate, - M: OsProcessModuleInfo, ->( +fn chunk_vat_params( group: &mut BenchmarkGroup<'_, measurement::WallTime>, func_name: String, - cache_size: u64, + cache_size: usize, use_tlb: bool, - initialize_ctx: &dyn Fn() -> Result<(T, V, P, S, M)>, + initialize_ctx: &dyn Fn(usize, bool) -> Result>, ) { let size = 0x10; + + let mut os = initialize_ctx(cache_size, use_tlb).unwrap(); + for &chunk_size in [1, 4, 16, 64].iter() { group.throughput(Throughput::Elements(chunk_size * size)); group.bench_with_input( BenchmarkId::new(func_name.clone(), chunk_size), &size, |b, &size| { - vat_test_with_ctx( + vat_test_with_os( b, - black_box(cache_size), - black_box(chunk_size as usize), - black_box((size * chunk_size) as usize), - black_box(use_tlb), - initialize_ctx().unwrap(), + black_box(chunk_size.try_into().unwrap()), + black_box((size * chunk_size).try_into().unwrap()), + &mut os, ) }, ); } } -pub fn chunk_vat< - T: PhysicalMemory, - V: VirtualTranslate, - P: OsProcessInfo, - S: ScopedVirtualTranslate, - M: OsProcessModuleInfo, ->( +pub fn chunk_vat( c: &mut Criterion, backend_name: &str, - initialize_ctx: &dyn Fn() -> Result<(T, V, P, S, M)>, + initialize_ctx: &dyn Fn(usize, bool) -> Result>, + use_caches: bool, ) { let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); - let group_name = format!("{}_chunk_vat", backend_name); + let group_name = format!("{backend_name}_chunk_vat"); let mut group = c.benchmark_group(group_name.clone()); group.plot_config(plot_config); chunk_vat_params( &mut group, - format!("{}_nocache", group_name), + format!("{group_name}_nocache"), 0, false, initialize_ctx, ); - chunk_vat_params( - &mut group, - format!("{}_tlb_nocache", group_name), - 0, - true, - initialize_ctx, - ); - chunk_vat_params( - &mut group, - format!("{}_cache", group_name), - 2, - false, - initialize_ctx, - ); - chunk_vat_params( - &mut group, - format!("{}_tlb_cache", group_name), - 2, - true, - initialize_ctx, - ); + if use_caches { + chunk_vat_params( + &mut group, + format!("{group_name}_tlb_nocache"), + 0, + true, + initialize_ctx, + ); + chunk_vat_params( + &mut group, + format!("{group_name}_cache"), + 2, + false, + initialize_ctx, + ); + chunk_vat_params( + &mut group, + format!("{group_name}_tlb_cache"), + 2, + true, + initialize_ctx, + ); + } } diff --git a/apex_dma/memflow_lib/memflow-bench/src/virt.rs b/apex_dma/memflow_lib/memflow-bench/src/virt.rs index 5c95769..987816b 100644 --- a/apex_dma/memflow_lib/memflow-bench/src/virt.rs +++ b/apex_dma/memflow_lib/memflow-bench/src/virt.rs @@ -1,23 +1,22 @@ use criterion::*; -use memflow::mem::{ - CachedMemoryAccess, CachedVirtualTranslate, PhysicalMemory, VirtualDMA, VirtualMemory, - VirtualReadData, VirtualTranslate, -}; +use memflow::mem::MemoryView; -use memflow::architecture::ScopedVirtualTranslate; +use memflow::cglue::*; use memflow::error::Result; -use memflow::process::*; -use memflow::types::*; +use memflow::os::*; +use memflow::plugins::*; use rand::prelude::*; use rand::{Rng, SeedableRng}; use rand_xorshift::XorShiftRng as CurRng; -fn rwtest( +use std::convert::TryInto; + +fn rwtest( bench: &mut Bencher, virt_mem: &mut T, - module: &M, + module: &ModuleInfo, chunk_sizes: &[usize], chunk_counts: &[usize], read_size: usize, @@ -28,30 +27,28 @@ fn rwtest( for i in chunk_sizes { for o in chunk_counts { - let mut vbufs = vec![vec![0 as u8; *i]; *o]; + let mut vbufs = vec![vec![0_u8; *i]; *o]; let mut done_size = 0; while done_size < read_size { - let base_addr = rng.gen_range( - module.base().as_u64(), - module.base().as_u64() + module.size() as u64, - ); + let base_addr = + rng.gen_range(module.base.to_umem()..(module.base.to_umem() + module.size)); let mut bufs = Vec::with_capacity(*o); - for VirtualReadData(addr, _) in bufs.iter_mut() { - *addr = (base_addr + rng.gen_range(0, 0x2000)).into(); + for CTup2(addr, _) in bufs.iter_mut() { + *addr = (base_addr + rng.gen_range(0..0x2000)).into(); } bufs.extend(vbufs.iter_mut().map(|vec| { - VirtualReadData( - (base_addr + rng.gen_range(0, 0x2000)).into(), - vec.as_mut_slice(), + CTup2( + (base_addr + rng.gen_range(0..0x2000)).into(), + vec.as_mut_slice().into(), ) })); bench.iter(|| { - let _ = black_box(virt_mem.virt_read_raw_list(bufs.as_mut_slice())); + let _ = black_box(virt_mem.read_raw_list(bufs.as_mut_slice())); }); done_size += *i * *o; } @@ -63,12 +60,12 @@ fn rwtest( total_size } -pub fn read_test_with_mem( +pub fn read_test_with_mem( bench: &mut Bencher, virt_mem: &mut T, chunk_size: usize, chunks: usize, - tmod: M, + tmod: ModuleInfo, ) { black_box(rwtest( bench, @@ -80,111 +77,63 @@ pub fn read_test_with_mem( )); } -fn read_test_with_ctx< - T: PhysicalMemory, - V: VirtualTranslate, - P: OsProcessInfo, - S: ScopedVirtualTranslate, - M: OsProcessModuleInfo, ->( +fn read_test_with_os( bench: &mut Bencher, - cache_size: u64, chunk_size: usize, chunks: usize, - use_tlb: bool, - (mut mem, vat, proc, translator, tmod): (T, V, P, S, M), + os: &mut OsInstanceArcBox<'static>, ) { - if cache_size > 0 { - let cache = CachedMemoryAccess::builder(&mut mem) - .arch(proc.sys_arch()) - .cache_size(size::mb(cache_size as usize)) - .page_type_mask(PageType::PAGE_TABLE | PageType::READ_ONLY | PageType::WRITEABLE); - - if use_tlb { - let mem = cache.build().unwrap(); - let vat = CachedVirtualTranslate::builder(vat) - .arch(proc.sys_arch()) - .build() - .unwrap(); - let mut virt_mem = VirtualDMA::with_vat(mem, proc.proc_arch(), translator, vat); - read_test_with_mem(bench, &mut virt_mem, chunk_size, chunks, tmod); - } else { - let mem = cache.build().unwrap(); - let mut virt_mem = VirtualDMA::with_vat(mem, proc.proc_arch(), translator, vat); - read_test_with_mem(bench, &mut virt_mem, chunk_size, chunks, tmod); - } - } else if use_tlb { - let vat = CachedVirtualTranslate::builder(vat) - .arch(proc.sys_arch()) - .build() - .unwrap(); - let mut virt_mem = VirtualDMA::with_vat(mem, proc.proc_arch(), translator, vat); - read_test_with_mem(bench, &mut virt_mem, chunk_size, chunks, tmod); - } else { - let mut virt_mem = VirtualDMA::with_vat(mem, proc.proc_arch(), translator, vat); - read_test_with_mem(bench, &mut virt_mem, chunk_size, chunks, tmod); - } + let (mut proc, module) = crate::util::find_proc(os).unwrap(); + read_test_with_mem(bench, &mut proc, chunk_size, chunks, module); } -fn seq_read_params< - T: PhysicalMemory, - V: VirtualTranslate, - P: OsProcessInfo, - S: ScopedVirtualTranslate, - M: OsProcessModuleInfo, ->( +fn seq_read_params( group: &mut BenchmarkGroup<'_, measurement::WallTime>, func_name: String, - cache_size: u64, + cache_size: usize, use_tlb: bool, - initialize_ctx: &dyn Fn() -> Result<(T, V, P, S, M)>, + initialize_ctx: &dyn Fn(usize, bool) -> Result>, ) { + let mut os = initialize_ctx(cache_size, use_tlb).unwrap(); + for &size in [0x8, 0x10, 0x100, 0x1000, 0x10000].iter() { group.throughput(Throughput::Bytes(size)); group.bench_with_input( BenchmarkId::new(func_name.clone(), size), &size, |b, &size| { - read_test_with_ctx( + read_test_with_os( b, - black_box(cache_size), - black_box(size as usize), + black_box(size.try_into().unwrap()), black_box(1), - black_box(use_tlb), - initialize_ctx().unwrap(), + &mut os, ) }, ); } } -fn chunk_read_params< - T: PhysicalMemory, - V: VirtualTranslate, - P: OsProcessInfo, - S: ScopedVirtualTranslate, - M: OsProcessModuleInfo, ->( +fn chunk_read_params( group: &mut BenchmarkGroup<'_, measurement::WallTime>, func_name: String, - cache_size: u64, + cache_size: usize, use_tlb: bool, - initialize_ctx: &dyn Fn() -> Result<(T, V, P, S, M)>, + initialize_ctx: &dyn Fn(usize, bool) -> Result>, ) { + let mut os = initialize_ctx(cache_size, use_tlb).unwrap(); + for &size in [0x8, 0x10, 0x100, 0x1000].iter() { for &chunk_size in [1, 4, 16, 64].iter() { group.throughput(Throughput::Bytes(size * chunk_size)); group.bench_with_input( - BenchmarkId::new(format!("{}_s{:x}", func_name, size), size * chunk_size), + BenchmarkId::new(format!("{func_name}_s{size:x}"), size * chunk_size), &size, |b, &size| { - read_test_with_ctx( + read_test_with_os( b, - black_box(cache_size), - black_box(size as usize), - black_box(chunk_size as usize), - black_box(use_tlb), - initialize_ctx().unwrap(), + black_box(size.try_into().unwrap()), + black_box(chunk_size.try_into().unwrap()), + &mut os, ) }, ); @@ -192,98 +141,93 @@ fn chunk_read_params< } } -pub fn seq_read< - T: PhysicalMemory, - V: VirtualTranslate, - P: OsProcessInfo, - S: ScopedVirtualTranslate, - M: OsProcessModuleInfo, ->( +pub fn seq_read( c: &mut Criterion, backend_name: &str, - initialize_ctx: &dyn Fn() -> Result<(T, V, P, S, M)>, + initialize_ctx: &dyn Fn(usize, bool) -> Result>, + use_caches: bool, ) { let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); - let group_name = format!("{}_virt_seq_read", backend_name); + let group_name = format!("{backend_name}_virt_seq_read"); let mut group = c.benchmark_group(group_name.clone()); group.plot_config(plot_config); seq_read_params( &mut group, - format!("{}_nocache", group_name), - 0, - false, - initialize_ctx, - ); - seq_read_params( - &mut group, - format!("{}_tlb_nocache", group_name), + format!("{group_name}_nocache"), 0, - true, - initialize_ctx, - ); - seq_read_params( - &mut group, - format!("{}_cache", group_name), - 2, false, initialize_ctx, ); - seq_read_params( - &mut group, - format!("{}_tlb_cache", group_name), - 2, - true, - initialize_ctx, - ); + if use_caches { + seq_read_params( + &mut group, + format!("{group_name}_tlb_nocache"), + 0, + true, + initialize_ctx, + ); + seq_read_params( + &mut group, + format!("{group_name}_cache"), + 2, + false, + initialize_ctx, + ); + seq_read_params( + &mut group, + format!("{group_name}_tlb_cache"), + 2, + true, + initialize_ctx, + ); + } } -pub fn chunk_read< - T: PhysicalMemory, - V: VirtualTranslate, - P: OsProcessInfo, - S: ScopedVirtualTranslate, - M: OsProcessModuleInfo, ->( +pub fn chunk_read( c: &mut Criterion, backend_name: &str, - initialize_ctx: &dyn Fn() -> Result<(T, V, P, S, M)>, + initialize_ctx: &dyn Fn(usize, bool) -> Result>, + use_caches: bool, ) { let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); - let group_name = format!("{}_virt_chunk_read", backend_name); + let group_name = format!("{backend_name}_virt_chunk_read"); let mut group = c.benchmark_group(group_name.clone()); group.plot_config(plot_config); chunk_read_params( &mut group, - format!("{}_nocache", group_name), + format!("{group_name}_nocache"), 0, false, initialize_ctx, ); - chunk_read_params( - &mut group, - format!("{}_tlb_nocache", group_name), - 0, - true, - initialize_ctx, - ); - chunk_read_params( - &mut group, - format!("{}_cache", group_name), - 2, - false, - initialize_ctx, - ); - chunk_read_params( - &mut group, - format!("{}_tlb_cache", group_name), - 2, - true, - initialize_ctx, - ); + + if use_caches { + chunk_read_params( + &mut group, + format!("{group_name}_tlb_nocache"), + 0, + true, + initialize_ctx, + ); + chunk_read_params( + &mut group, + format!("{group_name}_cache"), + 2, + false, + initialize_ctx, + ); + chunk_read_params( + &mut group, + format!("{group_name}_tlb_cache"), + 2, + true, + initialize_ctx, + ); + } } diff --git a/apex_dma/memflow_lib/memflow-bench/vagrant/Vagrantfile b/apex_dma/memflow_lib/memflow-bench/vagrant/Vagrantfile new file mode 100644 index 0000000..d30a007 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-bench/vagrant/Vagrantfile @@ -0,0 +1,39 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +Vagrant.configure("2") do |config| + config.vm.provider :virtualbox do |v| + v.memory = 2048 + v.cpus = 4 + config.vm.synced_folder "../..", "/memflow", create: true, disabled: false + end + config.vm.provider :libvirt do |v| + v.memory = 2048 + v.cpus = 4 + v.qemu_use_session = false + config.vm.synced_folder "../..", "/memflow", type: 'nfs', nfs_udp: false, create: true, disabled: false + end + + config.vm.box = "generic/ubuntu2004" + config.vm.provision :shell, privileged: true, inline: $install_updates + config.vm.provision :shell, privileged: false, inline: $install_memflow +end + +$install_updates = <<-SCRIPT +echo "installing updates" +export DEBIAN_FRONTEND=noninteractive +apt-get -qq update +apt-get -qq install git build-essential +SCRIPT + +$install_memflow = <<-SCRIPT +echo "installing rust" +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y +source $HOME/.cargo/env + +echo "building memflow" +cd /memflow +cargo build --release --workspace --all-features + +# TODO: install memflow-win32, memflow-coredump, etc +SCRIPT diff --git a/apex_dma/memflow_lib/memflow-derive/Cargo.toml b/apex_dma/memflow_lib/memflow-derive/Cargo.toml index 3b3fb67..ff59076 100644 --- a/apex_dma/memflow_lib/memflow-derive/Cargo.toml +++ b/apex_dma/memflow_lib/memflow-derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "memflow-derive" -version = "0.1.5" +version = "0.2.0" authors = ["ko1N ", "Aurimas Blažulionis <0x60@pm.me>"] edition = "2018" description = "derive macros for the memflow physical memory introspection framework" @@ -8,7 +8,7 @@ documentation = "https://docs.rs/memflow-derive" readme = "README.md" homepage = "https://memflow.github.io" repository = "https://github.com/memflow/memflow" -license-file = "../LICENSE" +license = "MIT" keywords = [ "memflow", "introspection", "memory", "dma" ] categories = [ "memory-management", "os" ] @@ -19,10 +19,9 @@ maintenance = { status = "actively-developed" } proc-macro = true [dependencies] -proc-macro2 = "1.0" -syn = "1.0" -quote = "1.0" -darling = "0.10" +proc-macro2 = "^1.0" +syn = "^2.0" +quote = "^1.0" +darling = "^0.20" +proc-macro-crate = "^2.0" -[dev-dependencies] -memflow = { version = "0.1", path = "../memflow" } diff --git a/apex_dma/memflow_lib/memflow-derive/src/lib.rs b/apex_dma/memflow_lib/memflow-derive/src/lib.rs index a4d9d0b..c8f6aa0 100644 --- a/apex_dma/memflow_lib/memflow-derive/src/lib.rs +++ b/apex_dma/memflow_lib/memflow-derive/src/lib.rs @@ -1,69 +1,603 @@ -use darling::FromMeta; +use darling::{ast::NestedMeta, FromMeta}; use proc_macro::TokenStream; -use quote::quote; -use syn::{parse_macro_input, AttributeArgs, Data, DeriveInput, Fields, ItemFn}; +use proc_macro_crate::*; +use quote::{format_ident, quote}; +use syn::{parse_macro_input, Data, DeriveInput, Fields, ItemFn}; #[derive(Debug, FromMeta)] struct ConnectorFactoryArgs { name: String, #[darling(default)] version: Option, + #[darling(default)] + description: Option, + #[darling(default)] + help_fn: Option, + #[darling(default)] + target_list_fn: Option, + #[darling(default)] + accept_input: bool, + #[darling(default)] + return_wrapped: bool, + #[darling(default)] + no_default_cache: bool, +} + +#[derive(Debug, FromMeta)] +struct OsFactoryArgs { + name: String, + #[darling(default)] + version: Option, + #[darling(default)] + description: Option, + #[darling(default)] + help_fn: Option, + #[darling(default)] + accept_input: bool, + #[darling(default)] + return_wrapped: bool, +} + +fn validate_plugin_name(name: &str) { + if !name + .chars() + .all(|c| char::is_alphanumeric(c) || c == '-' || c == '_') + { + panic!("plugin name must only contain alphanumeric characters"); + } } -// We should add conditional compilation for the crate-type here -// so our rust libraries who use a connector wont export those functions -// again by themselves (e.g. the ffi). -// -// This would also lead to possible duplicated symbols if -// multiple connectors are imported. -// -// See https://github.com/rust-lang/rust/issues/20267 for the tracking issue. -// -// #[cfg(crate_type = "cdylib")] +/// Creates a memflow connector plugin. +/// This function takes care of supplying all necessary underlying structures +/// for exposing a memflow connector plugin in the form of a dylib. +/// +/// Remarks: +/// +/// We should add conditional compilation for the crate-type here +/// so our rust libraries who use a connector wont export those functions +/// again by themselves (e.g. the ffi). +/// +/// This would also lead to possible duplicated symbols if +/// multiple connectors are imported. +/// +/// See for the tracking issue. +/// +/// #[cfg(crate_type = "cdylib")] +/// +/// Macro Parameters: +/// +/// `name` - The name of the plugin +/// `version` - The version of the plugin +/// `description` - Short description of the plugin +/// `help_fn` - Name of the function that provides a help text to the user +/// `target_list_fn` - Name of the function that provides a list of all targets to the user +/// `accept_input` - Wether or not this Connector is able to accept an Os-Plugin as an input +/// `return_wrapped` - Wether or not the return value is an already wrapped cglue object or if the macro needs to construct it +/// `no_default_cache` - Disables the default caching behavior if no cache configuration is supplied by the user. +/// +/// Caching: +/// +/// By default the proc macro will call `memflow::plugins::connector::create_instance` internally which will handle the caching functionality. +/// Either the user did not specify any caching, which results in the default caching configuration being used, or the user +/// did choose a custom caching configuration which will override the default caching configuration. +/// +/// In case `no_default_cache` is used the default behavior will be to use no caching. If the user supplies a cache configuration even +/// if `no_default_cache` is set the `memflow::plugins::connector::create_instance` function will still instantiate the requested configuration. +/// +/// In case `return_wrapped` is set to true the caching behavior has to be handled by the end user simply by +/// calling `memflow::plugins::connector::create_instance` with the appropiate arguments. +/// +/// Examples: +/// +/// Simple usage: +/// ```rust,ignore +/// # use ::memflow::prelude::v1::*; +/// # use ::memflow::dummy::*; +/// #[connector(name = "dummy_conn", version = "1.0.0", description = "Dummy Plugin for Testing purposes")] +/// pub fn create_connector(_args: &ConnectorArgs) -> Result { +/// Ok(DummyMemory::new(size::mb(16))) +/// } +/// ``` +/// +/// Disable default caching: +/// ```rust,ignore +/// # use ::memflow::prelude::v1::*; +/// # use ::memflow::dummy::*; +/// #[connector(name = "dummy_conn", no_default_cache = true)] +/// pub fn create_connector(_args: &ConnectorArgs) -> Result { +/// Ok(DummyMemory::new(size::mb(16))) +/// } +/// ``` +/// +/// Custom help function: +/// ```rust,ignore +/// # use ::memflow::prelude::v1::*; +/// # use ::memflow::dummy::*; +/// #[connector(name = "dummy_conn", help_fn = "help")] +/// pub fn create_connector(_args: &ConnectorArgs) -> Result { +/// Ok(DummyMemory::new(size::mb(16))) +/// } +/// +/// pub fn help() -> String { +/// "Dummy Plugin for Testing purposes".to_string() +/// } +/// ``` +/// +/// Custom target list function: +/// ```rust,ignore +/// # use ::memflow::prelude::v1::*; +/// # use ::memflow::dummy::*; +/// # use std::vec::Vec; +/// #[connector(name = "dummy_conn", target_list_fn = "target_list")] +/// pub fn create_connector(_args: &ConnectorArgs) -> Result { +/// Ok(DummyMemory::new(size::mb(16))) +/// } +/// +/// pub fn target_list() -> Result> { +/// Ok(Vec::new()) +/// } +/// ``` +/// +/// Wrapped return with manually created connector instance: +/// ```rust,ignore +/// # use ::memflow::prelude::v1::*; +/// # use ::memflow::dummy::*; +/// #[connector(name = "dummy_conn", return_wrapped = true)] +/// pub fn create_connector( +/// args: &ConnectorArgs, +/// lib: LibArc, +/// ) -> Result> { +/// let connector = DummyMemory::new(size::mb(16)); +/// Ok(memflow::plugins::connector::create_instance(connector, lib, args, false)) +/// } +/// ``` +/// +/// Connector with input parameter: +/// ```rust,ignore +/// # use ::memflow::prelude::v1::*; +/// # use ::memflow::dummy::*; +/// #[connector(name = "dummy_conn", accept_input = true)] +/// pub fn create_connector( +/// _args: &ConnectorArgs, +/// _os: Option>, +/// ) -> Result { +/// Ok(DummyMemory::new(size::mb(16))) +/// } +/// ``` +/// +/// Connector with input parameter and manually created connector instance: +/// ```rust,ignore +/// # use ::memflow::prelude::v1::*; +/// # use ::memflow::dummy::*; +/// #[connector(name = "dummy_conn", accept_input = true, return_wrapped = true)] +/// pub fn create_connector<'a>( +/// args: &ConnectorArgs, +/// _os: Option>, +/// lib: LibArc, +/// ) -> Result> { +/// let connector = DummyMemory::new(size::mb(16)); +/// Ok(memflow::plugins::connector::create_instance(connector, lib, args, false)) +/// } +/// ``` #[proc_macro_attribute] pub fn connector(args: TokenStream, input: TokenStream) -> TokenStream { - let attr_args = parse_macro_input!(args as AttributeArgs); + let crate_path = crate_path(); + + let attr_args = match NestedMeta::parse_meta_list(args.into()) { + Ok(v) => v, + Err(e) => return TokenStream::from(darling::Error::from(e).write_errors()), + }; let args = match ConnectorFactoryArgs::from_list(&attr_args) { Ok(v) => v, Err(e) => return TokenStream::from(e.write_errors()), }; let connector_name = args.name; + validate_plugin_name(&connector_name); + + let version_gen = args + .version + .map_or_else(|| quote! { env!("CARGO_PKG_VERSION") }, |v| quote! { #v }); + + let description_gen = args.description.map_or_else( + || quote! { env!("CARGO_PKG_DESCRIPTION") }, + |d| quote! { #d }, + ); + + let help_gen = if args.help_fn.is_some() { + quote! { Some(mf_help_callback) } + } else { + quote! { None } + }; + + let target_list_gen = if args.target_list_fn.is_some() { + quote! { Some(mf_target_list_callback) } + } else { + quote! { None } + }; + + let connector_descriptor: proc_macro2::TokenStream = + ["MEMFLOW_CONNECTOR_", &connector_name.to_uppercase()] + .concat() + .parse() + .unwrap(); let func = parse_macro_input!(input as ItemFn); let func_name = &func.sig.ident; - let gen = quote! { - #[cfg(feature = "inventory")] - #[doc(hidden)] - pub static CONNECTOR_NAME: &str = #connector_name; + let func_accept_input = args.accept_input; + let func_return_wrapped = args.return_wrapped; + + let no_default_cache = args.no_default_cache; - #[cfg(feature = "inventory")] + // create wrapping function according to input/output configuration + #[allow(clippy::collapsible_else_if)] + let create_fn_gen_inner = if func_accept_input { + if !func_return_wrapped { + // args + os + quote! { + #crate_path::plugins::wrap_with_input(args, os.into(), lib, logger, out, |a, os, lib| { + Ok(#crate_path::plugins::connector::create_instance(#func_name(a, os)?, lib, a, #no_default_cache)) + }) + } + } else { + // args + os + lib + quote! { + #crate_path::plugins::wrap_with_input(args, os.into(), lib, logger, out, #func_name) + } + } + } else { + if !func_return_wrapped { + // args + quote! { + #crate_path::plugins::wrap(args, lib, logger, out, |a, lib| { + Ok(#crate_path::plugins::connector::create_instance(#func_name(a)?, lib, a, #no_default_cache)) + }) + } + } else { + // args + lib + quote! { + #crate_path::plugins::wrap(args, lib, logger, out, #func_name) + } + } + }; + + let create_fn_gen = quote! { + #[doc(hidden)] + extern "C" fn mf_create( + args: Option<&#crate_path::plugins::connector::ConnectorArgs>, + os: #crate_path::cglue::option::COption<#crate_path::plugins::os::OsInstanceArcBox<'static>>, + lib: #crate_path::plugins::LibArc, + logger: Option<&'static #crate_path::plugins::PluginLogger>, + out: &mut #crate_path::plugins::connector::MuConnectorInstanceArcBox<'static> + ) -> i32 { + #create_fn_gen_inner + } + }; + + let help_fn_gen = args.help_fn.map(|v| v.parse().unwrap()).map_or_else( + proc_macro2::TokenStream::new, + |func_name: proc_macro2::TokenStream| { + quote! { + #[doc(hidden)] + extern "C" fn mf_help_callback( + mut callback: #crate_path::plugins::HelpCallback, + ) { + let helpstr = #func_name(); + let _ = callback.call(helpstr.into()); + } + } + }, + ); + + let target_list_fn_gen = args.target_list_fn.map(|v| v.parse().unwrap()).map_or_else( + proc_macro2::TokenStream::new, + |func_name: proc_macro2::TokenStream| { + quote! { + #[doc(hidden)] + extern "C" fn mf_target_list_callback( + mut callback: #crate_path::plugins::TargetCallback, + ) -> i32 { + #func_name() + .map(|mut targets| { + targets + .into_iter() + .take_while(|t| callback.call(t.clone())) + .for_each(|_| ()); + }) + .into_int_result() + } + } + }, + ); + + let gen = quote! { #[doc(hidden)] #[no_mangle] - pub static MEMFLOW_CONNECTOR: ::memflow::connector::ConnectorDescriptor = ::memflow::connector::ConnectorDescriptor { - connector_version: ::memflow::connector::MEMFLOW_CONNECTOR_VERSION, - name: CONNECTOR_NAME, - factory: connector_factory, + pub static #connector_descriptor: #crate_path::plugins::ConnectorDescriptor = #crate_path::plugins::ConnectorDescriptor { + plugin_version: #crate_path::plugins::MEMFLOW_PLUGIN_VERSION, + accept_input: #func_accept_input, + input_layout: <<#crate_path::plugins::LoadableConnector as #crate_path::plugins::Loadable>::CInputArg as #crate_path::abi_stable::StableAbi>::LAYOUT, + output_layout: <<#crate_path::plugins::LoadableConnector as #crate_path::plugins::Loadable>::Instance as #crate_path::abi_stable::StableAbi>::LAYOUT, + name: #crate_path::cglue::CSliceRef::from_str(#connector_name), + version: #crate_path::cglue::CSliceRef::from_str(#version_gen), + description: #crate_path::cglue::CSliceRef::from_str(#description_gen), + help_callback: #help_gen, + target_list_callback: #target_list_gen, + create: mf_create, }; - #[cfg(feature = "inventory")] - pub extern "C" fn connector_factory(args: &::memflow::connector::ConnectorArgs) -> ::memflow::error::Result<::memflow::connector::ConnectorType> { - let connector = #func_name(args)?; - Ok(Box::new(connector)) + #create_fn_gen + + #help_fn_gen + + #target_list_fn_gen + + #func + }; + + gen.into() +} + +/// Creates a memflow os plugin. +/// This function takes care of supplying all necessary underlying structures +/// for exposing a memflow os plugin in the form of a dylib. +/// +/// Macro Parameters: +/// +/// `name` - The name of the plugin +/// `version` - The version of the plugin +/// `description` - Short description of the plugin +/// `help_fn` - Name of the function that provides a help text to the user +/// `accept_input` - Wether or not this Os-Plugin is able to accept a connector as an input +/// `return_wrapped` - Wether or not the return value is an already wrapped cglue object or if the macro needs to construct it +/// +/// Examples: +/// +/// Simple usage: +/// ```rust,ignore +/// # use ::memflow::prelude::v1::*; +/// # use ::memflow::dummy::*; +/// #[os(name = "dummy_os", version = "1.0.0", description = "Dummy Plugin for Testing purposes")] +/// pub fn create_os( +/// _args: &OsArgs, +/// ) -> Result { +/// let phys_mem = DummyMemory::new(size::mb(16)); +/// Ok(DummyOs::new(phys_mem)) +/// } +/// +/// ``` +/// Custom help function: +/// ```rust,ignore +/// # use ::memflow::prelude::v1::*; +/// # use ::memflow::dummy::*; +/// #[os(name = "dummy_os", help_fn = "help")] +/// pub fn create_os( +/// _args: &OsArgs, +/// ) -> Result { +/// let phys_mem = DummyMemory::new(size::mb(16)); +/// Ok(DummyOs::new(phys_mem)) +/// } +/// +/// pub fn help() -> String { +/// "Dummy Plugin for Testing purposes".to_string() +/// } +/// ``` +/// +/// Wrapped return with manually created os instance: +/// ```rust,ignore +/// # use ::memflow::prelude::v1::*; +/// # use ::memflow::dummy::*; +/// #[os(name = "dummy_os", return_wrapped = true)] +/// pub fn create_os( +/// args: &OsArgs, +/// lib: LibArc, +/// ) -> Result> { +/// let phys_mem = DummyMemory::new(size::mb(16)); +/// let os = DummyOs::new(phys_mem); +/// Ok(memflow::plugins::os::create_instance(os, lib, args)) +/// } +/// ``` +/// +/// Os with input parameter: +/// ```rust,ignore +/// # use ::memflow::prelude::v1::*; +/// # use ::memflow::dummy::*; +/// #[os(name = "dummy_os", accept_input = true)] +/// pub fn create_os( +/// args: &OsArgs, +/// _connector: Option>, +/// ) -> Result { +/// let phys_mem = DummyMemory::new(size::mb(16)); +/// Ok(DummyOs::new(phys_mem)) +/// } +/// ``` +/// +/// Os with input parameter and manually created os instance: +/// ```rust,ignore +/// # use ::memflow::prelude::v1::*; +/// # use ::memflow::dummy::*; +/// #[os(name = "dummy_os", accept_input = true, return_wrapped = true)] +/// pub fn create_os( +/// args: &OsArgs, +/// _connector: Option>, +/// lib: LibArc, +/// ) -> Result> { +/// let phys_mem = DummyMemory::new(size::mb(16)); +/// let os = DummyOs::new(phys_mem); +/// Ok(memflow::plugins::os::create_instance(os, lib, args)) +/// } +/// ``` +#[proc_macro_attribute] +pub fn os(args: TokenStream, input: TokenStream) -> TokenStream { + let crate_path = crate_path(); + + let attr_args = match NestedMeta::parse_meta_list(args.into()) { + Ok(v) => v, + Err(e) => return TokenStream::from(darling::Error::from(e).write_errors()), + }; + let args = match OsFactoryArgs::from_list(&attr_args) { + Ok(v) => v, + Err(e) => return TokenStream::from(e.write_errors()), + }; + + let os_name = args.name; + validate_plugin_name(&os_name); + + let version_gen = args + .version + .map_or_else(|| quote! { env!("CARGO_PKG_VERSION") }, |v| quote! { #v }); + + let description_gen = args.description.map_or_else( + || quote! { env!("CARGO_PKG_DESCRIPTION") }, + |d| quote! { #d }, + ); + + let help_gen = if args.help_fn.is_some() { + quote! { Some(mf_help_callback) } + } else { + quote! { None } + }; + + let os_descriptor: proc_macro2::TokenStream = ["MEMFLOW_OS_", &os_name.to_uppercase()] + .concat() + .parse() + .unwrap(); + + let func = parse_macro_input!(input as ItemFn); + let func_name = &func.sig.ident; + + let func_accept_input = args.accept_input; + let func_return_wrapped = args.return_wrapped; + + // create wrapping function according to input/output configuration + #[allow(clippy::collapsible_else_if)] + let create_fn_gen_inner = if func_accept_input { + if !func_return_wrapped { + // inputs: args + connector + quote! { + #crate_path::plugins::wrap_with_input(args, connector.into(), lib, logger, out, |a, os, lib| { + Ok(#crate_path::plugins::os::create_instance(#func_name(a, os)?, lib, a)) + }) + } + } else { + // inputs: args + connector + lib + quote! { + #crate_path::plugins::wrap_with_input(args, connector.into(), lib, logger, out, #func_name) + } } + } else { + if !func_return_wrapped { + // inputs: args + quote! { + #crate_path::plugins::wrap(args, lib, logger, out, |a, lib| { + Ok(#crate_path::plugins::os::create_instance(#func_name(a)?, lib, a)) + }) + } + } else { + // inputs: args + lib + quote! { + #crate_path::plugins::wrap(args, lib, logger, out, #func_name) + } + } + }; - pub fn static_connector_factory(args: &::memflow::connector::ConnectorArgs) -> ::memflow::error::Result { - #func_name(args) + let create_fn_gen = quote! { + #[doc(hidden)] + extern "C" fn mf_create( + args: Option<&#crate_path::plugins::os::OsArgs>, + connector: #crate_path::cglue::COption<#crate_path::plugins::connector::ConnectorInstanceArcBox<'static>>, + lib: #crate_path::plugins::LibArc, + logger: Option<&'static #crate_path::plugins::PluginLogger>, + out: &mut #crate_path::plugins::os::MuOsInstanceArcBox<'static> + ) -> i32 { + #create_fn_gen_inner } + }; + + let help_fn_gen = args.help_fn.map(|v| v.parse().unwrap()).map_or_else( + proc_macro2::TokenStream::new, + |func_name: proc_macro2::TokenStream| { + quote! { + #[doc(hidden)] + extern "C" fn mf_help_callback( + mut callback: #crate_path::plugins::HelpCallback, + ) { + let helpstr = #func_name(); + let _ = callback.call(helpstr.into()); + } + } + }, + ); + + let gen = quote! { + #[doc(hidden)] + #[no_mangle] + pub static #os_descriptor: #crate_path::plugins::os::OsDescriptor = #crate_path::plugins::os::OsDescriptor { + plugin_version: #crate_path::plugins::MEMFLOW_PLUGIN_VERSION, + accept_input: #func_accept_input, + input_layout: <<#crate_path::plugins::os::LoadableOs as #crate_path::plugins::Loadable>::CInputArg as #crate_path::abi_stable::StableAbi>::LAYOUT, + output_layout: <<#crate_path::plugins::os::LoadableOs as #crate_path::plugins::Loadable>::Instance as #crate_path::abi_stable::StableAbi>::LAYOUT, + name: #crate_path::cglue::CSliceRef::from_str(#os_name), + version: #crate_path::cglue::CSliceRef::from_str(#version_gen), + description: #crate_path::cglue::CSliceRef::from_str(#description_gen), + help_callback: #help_gen, + target_list_callback: None, // non existent on Os Plugins + create: mf_create, + }; + + #create_fn_gen + + #help_fn_gen #func }; + gen.into() } +/// Auto derive the `Pod` trait for structs. +/// +/// The type is checked for requirements of the `Pod` trait: +/// +/// * Be annotated with `repr(C)` or `repr(transparent)`. +/// +/// * Have every field's type implement `Pod` itself. +/// +/// * Not have any padding between its fields. +/// +/// # Compile errors +/// +/// Error reporting is not very ergonomic due to how errors are detected: +/// +/// * `error[E0277]: the trait bound $TYPE: Pod is not satisfied` +/// +/// The struct contains a field whose type does not implement `Pod`. +/// +/// * `error[E0512]: cannot transmute between types of different sizes, or dependently-sized types` +/// +/// This error means your struct has padding as its size is not equal to a byte array of length equal to the sum of the size of its fields. +/// +/// * `error: no rules expected the token <` +/// +/// The struct contains generic parameters which are not supported. It may still be possible to manually implement `Pod` but extra care should be taken to ensure its invariants are upheld. +/// +/// # Remarks: +/// This custom derive macro is required because the dataview proc macro searches for ::dataview::derive_pod!(). +/// See for the original implementation. +#[proc_macro_derive(Pod)] +pub fn pod_derive(input: TokenStream) -> TokenStream { + let crate_path = crate_path(); + + format!("{crate_path}::dataview::derive_pod!{{ {input} }}") + .parse() + .unwrap() +} + #[proc_macro_derive(ByteSwap)] pub fn byteswap_derive(input: TokenStream) -> TokenStream { + let crate_path = crate_path(); + let input = parse_macro_input!(input as DeriveInput); let name = &input.ident; let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); @@ -85,7 +619,7 @@ pub fn byteswap_derive(input: TokenStream) -> TokenStream { }; let gen = quote!( - impl #impl_generics ::memflow::types::byte_swap::ByteSwap for #name #ty_generics #where_clause { + impl #impl_generics #crate_path::types::byte_swap::ByteSwap for #name #ty_generics #where_clause { fn byte_swap(&mut self) { #gen_inner } @@ -94,3 +628,34 @@ pub fn byteswap_derive(input: TokenStream) -> TokenStream { gen.into() } + +fn crate_path() -> proc_macro2::TokenStream { + let (col, ident) = crate_path_ident(); + quote!(#col #ident) +} + +fn crate_path_ident() -> (Option, proc_macro2::Ident) { + match crate_path_fixed() { + FoundCrate::Itself => (None, format_ident!("crate")), + FoundCrate::Name(name) => (Some(Default::default()), format_ident!("{}", name)), + } +} + +fn crate_path_fixed() -> FoundCrate { + let found_crate = crate_name("memflow").expect("memflow found in `Cargo.toml`"); + + match found_crate { + FoundCrate::Itself => { + let has_doc_env = std::env::vars().any(|(k, _)| { + k == "UNSTABLE_RUSTDOC_TEST_LINE" || k == "UNSTABLE_RUSTDOC_TEST_PATH" + }); + + if has_doc_env { + FoundCrate::Name("memflow".to_string()) + } else { + FoundCrate::Itself + } + } + x => x, + } +} diff --git a/apex_dma/memflow_lib/memflow-derive/tests/derive_test.rs b/apex_dma/memflow_lib/memflow-derive/tests/derive_test.rs deleted file mode 100644 index 86165b7..0000000 --- a/apex_dma/memflow_lib/memflow-derive/tests/derive_test.rs +++ /dev/null @@ -1,38 +0,0 @@ -use memflow::types::byte_swap::ByteSwap; -use memflow_derive::*; - -#[derive(ByteSwap)] -struct ByteSwapDerive { - pub val: u32, -} - -#[derive(ByteSwap)] -struct ByteSwapDeriveGeneric { - pub val: T, -} - -#[derive(ByteSwap)] -struct ByteSwapDeriveWhere -where - T: ByteSwap, -{ - pub val: T, -} - -#[derive(ByteSwap)] -struct ByteSwapDeriveSlice { - pub slice: [u8; 32], -} - -#[derive(ByteSwap)] -struct ByteSwapDeriveStructSlice { - pub slice: [ByteSwapDeriveSlice; 128], -} - -#[derive(ByteSwap)] -struct ByteSwapDeriveStructGenericSlice { - pub slice: [ByteSwapDeriveGeneric; 128], -} - -#[test] -pub fn compiles() {} diff --git a/apex_dma/memflow_lib/memflow-ffi/Cargo.toml b/apex_dma/memflow_lib/memflow-ffi/Cargo.toml index d7863f1..962d333 100644 --- a/apex_dma/memflow_lib/memflow-ffi/Cargo.toml +++ b/apex_dma/memflow_lib/memflow-ffi/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "memflow-ffi" -version = "0.1.5" +version = "0.2.0" authors = ["ko1N ", "Aurimas Blažulionis <0x60@pm.me>"] edition = "2018" description = "C bindings for the memflow physical memory introspection framework" @@ -8,7 +8,7 @@ documentation = "https://docs.rs/memflow-ffi" readme = "README.md" homepage = "https://memflow.github.io" repository = "https://github.com/memflow/memflow" -license-file = "../LICENSE" +license = "MIT" keywords = [ "memflow", "introspection", "memory", "dma" ] categories = [ "api-bindings", "memory-management", "os" ] @@ -21,9 +21,9 @@ name = "memflow_ffi" crate-type = ["lib", "cdylib", "staticlib"] [dependencies] -memflow = { version = "0.1", path = "../memflow" } -log = "0.4" -simple_logger = "1.9" +memflow = { version = "0.2", path = "../memflow" } +log = "^0.4.14" +simplelog = "^0.12.0" [features] default = [] diff --git a/apex_dma/memflow_lib/memflow-ffi/README.md b/apex_dma/memflow_lib/memflow-ffi/README.md index b91e52e..69f0929 100644 --- a/apex_dma/memflow_lib/memflow-ffi/README.md +++ b/apex_dma/memflow_lib/memflow-ffi/README.md @@ -46,4 +46,4 @@ int main(int argc, char *argv[]) { } ``` -Additional examples can be found in the `examples` folder as well as in the [memflow-win32-ffi](https://github.com/memflow/memflow/memflow-win32-ffi) crate. +Additional examples can be found in the `examples` folder. diff --git a/apex_dma/memflow_lib/memflow-ffi/bindgen.sh b/apex_dma/memflow_lib/memflow-ffi/bindgen.sh index ed04e66..5ea08d4 100644 --- a/apex_dma/memflow_lib/memflow-ffi/bindgen.sh +++ b/apex_dma/memflow_lib/memflow-ffi/bindgen.sh @@ -1,3 +1,23 @@ -#!/bin/bash -cargo build --release --workspace -cbindgen --config cbindgen.toml --crate memflow-ffi --output memflow.h +#!/usr/bin/env bash + +run_twice() { + echo $@ + + v=$(exec $@) + + if [ $? -ne 0 ]; then + echo "Error occured! Maybe a fluke. Retrying..." + v=$(exec $@) + fi +} + +# remove any RUSTC_WRAPPER like sccache which might cause issues with cglue-bindgen +export RUSTC_WRAPPER="" + +# update cglue-bindgen +cargo +nightly install cbindgen +cargo +nightly install cglue-bindgen + +# generate c and cpp bindings +run_twice rustup run nightly cglue-bindgen +nightly -c cglue.toml -- --config cbindgen.toml --crate memflow-ffi --output memflow.h -l C +run_twice rustup run nightly cglue-bindgen +nightly -c cglue.toml -- --config cbindgen.toml --crate memflow-ffi --output memflow.hpp -l C++ diff --git a/apex_dma/memflow_lib/memflow-ffi/cbindgen.toml b/apex_dma/memflow_lib/memflow-ffi/cbindgen.toml index ebc3b22..d29973d 100644 --- a/apex_dma/memflow_lib/memflow-ffi/cbindgen.toml +++ b/apex_dma/memflow_lib/memflow-ffi/cbindgen.toml @@ -7,13 +7,26 @@ style = "both" #no_includes = true cpp_compat = true +after_includes = "typedef void *Library;" + [parse] parse_deps = true - include = ["memflow"] +[parse.expand] +crates = ["cglue", "memflow", "memflow-ffi", "log"] + [macro_expansion] bitflags = true [fn] sort_by = "None" + +[export] +include = ["OsInstanceArcBox", "ProcessInstanceArcBox", "IntoProcessInstanceArcBox", "MemoryViewArcBox"] + +[export.rename] +"OptionMut_c_void" = "pvoid" + +[enum] +prefix_with_name = true diff --git a/apex_dma/memflow_lib/memflow-ffi/cglue.toml b/apex_dma/memflow_lib/memflow-ffi/cglue.toml new file mode 100644 index 0000000..75395a4 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-ffi/cglue.toml @@ -0,0 +1,3 @@ +default_container = "Box" +default_context = "Arc" +function_prefix = "mf" diff --git a/apex_dma/memflow_lib/memflow-ffi/examples/Makefile b/apex_dma/memflow_lib/memflow-ffi/examples/Makefile deleted file mode 100644 index ec39061..0000000 --- a/apex_dma/memflow_lib/memflow-ffi/examples/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -CC =g++ -CFLAGS =-I../ -I../../memflow-ffi/ -L../../target/release -LIBS=-lm -Wl,--no-as-needed -ldl -lpthread -l:libmemflow_win32_ffi.a - -ODIR=./ - -%.o: %.c $(DEPS) - $(CC) -c -o $@ $< $(CFLAGS) - -phys_mem.out: phys_mem.o - $(CC) -o $@ $^ $(CFLAGS) $(LIBS) - -.PHONY: all -all: phys_mem.out - -.DEFAULT_GOAL := all - -clean: - rm -f $(ODIR)/*.o diff --git a/apex_dma/memflow_lib/memflow-ffi/examples/c/.clang-format b/apex_dma/memflow_lib/memflow-ffi/examples/c/.clang-format new file mode 100644 index 0000000..c55cf80 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-ffi/examples/c/.clang-format @@ -0,0 +1,2 @@ +--- +BasedOnStyle: LLVM \ No newline at end of file diff --git a/apex_dma/memflow_lib/memflow-ffi/examples/c/Makefile b/apex_dma/memflow_lib/memflow-ffi/examples/c/Makefile new file mode 100644 index 0000000..1534fcc --- /dev/null +++ b/apex_dma/memflow_lib/memflow-ffi/examples/c/Makefile @@ -0,0 +1,31 @@ +CC = clang +CFLAGS = -g -O0 -I../../../memflow-ffi/ -L../../../target/release +LIBS=-lm -ldl -lpthread -l:libmemflow_ffi.a + +ODIR=./ + +%.o: %.c $(DEPS) + $(CC) -c -o $@ $< $(CFLAGS) + +phys_mem.out: phys_mem.o + $(CC) -o $@ $^ $(CFLAGS) $(LIBS) + +process_list.out: process_list.o + $(CC) -o $@ $^ $(CFLAGS) $(LIBS) + +module_list.out: module_list.o + $(CC) -o $@ $^ $(CFLAGS) $(LIBS) + +module_dump.out: module_dump.o + $(CC) -o $@ $^ $(CFLAGS) $(LIBS) + +find_process.out: find_process.o + $(CC) -o $@ $^ $(CFLAGS) $(LIBS) + +.PHONY: all +all: phys_mem.out process_list.out module_list.out module_dump.out find_process.out + +.DEFAULT_GOAL := all + +clean: + rm -f $(ODIR)/*.o diff --git a/apex_dma/memflow_lib/memflow-ffi/examples/c/find_process.c b/apex_dma/memflow_lib/memflow-ffi/examples/c/find_process.c new file mode 100644 index 0000000..dcd9793 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-ffi/examples/c/find_process.c @@ -0,0 +1,210 @@ +#include "memflow.h" + +#include +#include +#include + +ProcessInstance target_process; +ProcessInstance target_process2; + +struct FindProcessContext { + OsInstance *os; + const char *name; + ProcessInstance *target_process; + bool found; +}; + +bool find_process(struct FindProcessContext *find_context, Address addr) { + + if (find_context->found) { + return false; + } + + if (mf_osinstance_process_by_address(find_context->os, addr, + find_context->target_process)) { + return true; + } + + const struct ProcessInfo *info = + mf_processinstance_info(find_context->target_process); + + if (!strcmp(info->name, find_context->name)) { + // abort iteration + find_context->found = true; + return false; + } + + mf_processinstance_drop(*find_context->target_process); + + // continue iteration + return true; +} + +pthread_mutex_t lock; + +void *readmem(void *vargp) { + // Store the value argument passed to this thread + int *myid = (int *)vargp; + + uint8_t buffer[0x8]; + + while (true) { + pthread_mutex_lock(&lock); + mf_processinstance_read_raw_into(&target_process, 0x7FF72AF30000, + MUT_SLICE(u8, buffer, sizeof(buffer))); + + printf("TID: %d, Read: %lx\n", *myid, *(uint64_t *)buffer); + pthread_mutex_unlock(&lock); + } + return 0; +} + +int main(int argc, char *argv[]) { + + int ret = 0; + + // enable info level logging + log_init(3); + + // load all available plugins + Inventory *inventory = inventory_scan(); + printf("inventory initialized: %p\n", inventory); + + const char *conn_name = argc > 1 ? argv[1] : "kvm"; + const char *conn_arg = argc > 2 ? argv[2] : ""; + const char *os_name = argc > 3 ? argv[3] : "win32"; + const char *os_arg = argc > 4 ? argv[4] : ""; + const char *target_proc = argc > 5 ? argv[5] : "notepad.exe"; + + ConnectorInstance connector, *conn = conn_name[0] ? &connector : NULL; + + // initialize the connector plugin + if (conn) { + if (inventory_create_connector(inventory, conn_name, conn_arg, conn)) { + log_error("unable to initialize connector"); + inventory_free(inventory); + return 1; + } + + printf("connector initialized: %p\n", + connector.container.instance.instance); + } + + // initialize the OS plugin + OsInstance os; + if (inventory_create_os(inventory, os_name, os_arg, conn, &os)) { + log_error("unable to initialize os plugin"); + inventory_free(inventory); + return 1; + } + + printf("os plugin initialized: %p\n", os.container.instance.instance); + + // find a specific process based on it's name. + // this can easily be replaced by process_by_name but + // is being used here as a demonstration. + + /* struct FindProcessContext find_context = { + &os, + target_proc, + &target_process, + false, + }; + + mf_osinstance_process_address_list_callback( + &os, CALLBACK(Address, &find_context, find_process)); + + if (find_context.found) { + const struct ProcessInfo *info = mf_processinstance_info(&target_process); + + printf("%s process found: 0x%lx] %d %s %s\n", target_proc, info->address, + info->pid, info->name, info->path); + + mf_processinstance_drop(target_process); + } else { + printf("Unable to find %s\n", target_proc); + }*/ + + // find a specific process based on its name + // via process_by_name + if (!(ret = mf_osinstance_process_by_name(&os, STR(target_proc), + &target_process))) { + const struct ProcessInfo *info = mf_processinstance_info(&target_process); + + printf("%s process found: 0x%lx] %d %s %s\n", target_proc, info->address, + info->pid, info->name, info->path); + + ModuleInfo module_info; + mf_processinstance_module_by_name(&target_process, STR(target_proc), + &module_info); + printf("0x%lx\n", module_info.base); + + /*ConnectorInstance conn2; + if (inventory_create_connector(inventory, conn_name, conn_arg, &conn2)) { + log_error("unable to initialize connector"); + inventory_free(inventory); + return 1; +} + +ConnectorInstance cloned = mf_connectorinstance_clone(&conn2); + printf("ok1\n"); + +mf_connectorinstance_drop(cloned); + printf("ok2\n");*/ + + // MemoryView phys_view = mf_connectorinstance_phys_view(&conn2); + + /*if (pthread_mutex_init(&lock, NULL) != 0) { + printf("\n mutex init has failed\n"); + return 1; + } + + pthread_t tid; + pthread_t tid2; + pthread_create(&tid, NULL, readmem, (void *)&tid); + pthread_create(&tid2, NULL, readmem, (void *)&tid2); + + pthread_join(tid, NULL); + pthread_join(tid2, NULL); + + pthread_mutex_destroy(&lock);*/ + + if (!(ret = mf_osinstance_process_by_name(&os, STR("notepad++.exe"), + &target_process2))) { + const struct ProcessInfo *info2 = + mf_processinstance_info(&target_process2); + + printf("%s process found: 0x%lx] %d %s %s\n", "notepad++.exe", + info2->address, info2->pid, info2->name, info2->path); + + uint8_t buffer[0x8]; + + mf_processinstance_read_raw_into(&target_process, info->address + 0x520, + MUT_SLICE(u8, buffer, sizeof(buffer))); + + printf("Read: %lx\n", *(uint64_t *)buffer); + + mf_processinstance_read_raw_into(&target_process2, info2->address + 0x520, + MUT_SLICE(u8, buffer, sizeof(buffer))); + + printf("Read2: %lx\n", *(uint64_t *)buffer); + } else { + printf("error notepad++"); + } + + mf_processinstance_drop(target_process); + } else { + printf("Unable to find %s\n", target_proc); + log_debug_errorcode(ret); + } + + // This will also free the connector here + // as it was _moved_ into the os by `inventory_create_os` + mf_osinstance_drop(os); + log_info("os plugin/connector freed"); + + inventory_free(inventory); + log_info("inventory freed"); + + return 0; +} diff --git a/apex_dma/memflow_lib/memflow-ffi/examples/c/module_dump.c b/apex_dma/memflow_lib/memflow-ffi/examples/c/module_dump.c new file mode 100644 index 0000000..a18b3b4 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-ffi/examples/c/module_dump.c @@ -0,0 +1,126 @@ +/** + +This example demonstrates how to read the contents of a module from a process. + +To read from a specific module the following steps have to be done: + + - Create an inventory and let it search for plugins in the system + - Load the plugins to access physical memory and the operating system + (by default the `qemu` plugin and `win32` plugin are being used) + - Find the process by the specified name + - Find the module_info for the given module in the process + - Allocate a buffer which will fit the entire module + - Read the entire module into the buffer and ignore partial read errors + - Write the contents of the retrieved buffer to the specified output location + + +Usage: + + ./module_dump.out kvm :: win32 :: notepad.exe notepad.exe notepad.exe.bin + +*/ +#include "memflow.h" + +#include +#include +#include + +int main(int argc, char *argv[]) { + + int ret = 0; + + // enable info level logging + log_init(4); + + // load all available plugins + Inventory *inventory = inventory_scan(); + printf("inventory initialized: %p\n", inventory); + + const char *conn_name = argc > 1 ? argv[1] : "qemu"; + const char *conn_arg = argc > 2 ? argv[2] : ""; + const char *os_name = argc > 3 ? argv[3]: "win32"; + const char *os_arg = argc > 4? argv[4]: ""; + const char *target_proc = argc > 5? argv[5]: "notepad.exe"; + const char *target_module = argc > 6? argv[6]: "notepad.exe"; + const char *output_file = argc > 7? argv[7]: "notepad.exe.bin"; + + ConnectorInstance connector, *conn = conn_name[0] ? &connector : NULL; + + // initialize the connector plugin + if (conn) { + if (inventory_create_connector(inventory, conn_name, conn_arg, conn)) { + log_error("unable to initialize connector"); + inventory_free(inventory); + return 1; + } + + printf("connector initialized: %p\n", connector.container.instance.instance); + } + + // initialize the OS plugin + OsInstance os; + if (inventory_create_os(inventory, os_name, os_arg, conn, &os)) { + log_error("unable to initialize os plugin"); + inventory_free(inventory); + return 1; + } + + printf("os plugin initialized: %p\n", os.container.instance.instance); + + // find a specific process based on its name via process_by_name + ProcessInstance target_process; + if (!(ret = mf_osinstance_process_by_name(&os, STR(target_proc), &target_process))) { + const struct ProcessInfo *info = mf_processinstance_info(&target_process); + + printf("%s process found: 0x%lx] %d %s %s\n", target_proc, info->address, + info->pid, info->name, info->path); + + // find the module by its name + ModuleInfo module_info; + if (!(ret = mf_processinstance_module_by_name(&target_process, STR(target_module), &module_info))) { + printf("%s module found: 0x%lx] 0x%lx %s %s\n", target_proc, module_info.address, + module_info.base, module_info.name, module_info.path); + + // read module into buffer, in this case -2 / -3 are partial read/write errors + void *module_buffer = malloc(module_info.size); + ret = mf_processinstance_read_raw_into(&target_process, module_info.base, MUT_SLICE(u8, module_buffer, module_info.size)); + if (ret == -2) { + printf("%s warning: %s] module only read partially\n", target_proc, target_module); + } + + // module has been read + printf("%s read module: %s] read 0x%lx bytes\n", target_proc, target_module, module_info.size); + + // write the buffer to the specified location + FILE *file = fopen(output_file, "wb"); + if (file) { + fwrite(module_buffer, module_info.size, 1, file); + fclose(file); + printf("dumped 0x%lx bytes to %s\n", module_info.size, output_file); + } else { + printf("unable to open output file %s: %s\n", output_file, strerror(errno)); + } + + free(module_buffer); + } else { + printf("unable to find module: %s\n", target_module); + log_debug_errorcode(ret); + } + + // cleanup the processinstance + mf_processinstance_drop(target_process); + } else { + printf("unable to find process: %s\n", target_proc); + log_debug_errorcode(ret); + } + + // This will also free the connector here + // as it was _moved_ into the os by `inventory_create_os` + mf_osinstance_drop(os); + log_info("os plugin/connector freed"); + + inventory_free(inventory); + log_info("inventory freed"); + + return 0; +} diff --git a/apex_dma/memflow_lib/memflow-ffi/examples/c/module_list.c b/apex_dma/memflow_lib/memflow-ffi/examples/c/module_list.c new file mode 100644 index 0000000..88d4197 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-ffi/examples/c/module_list.c @@ -0,0 +1,80 @@ +#include "memflow.h" + +#include +#include + +int main(int argc, char *argv[]) { + + int ret = 0; + + // enable info level logging + log_init(3); + + // load all available plugins + Inventory *inventory = inventory_scan(); + printf("inventory initialized: %p\n", inventory); + + const char *conn_name = argc > 1 ? argv[1] : "kvm"; + const char *conn_arg = argc > 2 ? argv[2] : ""; + const char *os_name = argc > 3 ? argv[3]: "win32"; + const char *os_arg = argc > 4? argv[4]: ""; + const char *target_proc = argc > 5? argv[5]: "notepad.exe"; + + ConnectorInstance connector, *conn = conn_name[0] ? &connector : NULL; + + // initialize the connector plugin + if (conn) { + if (inventory_create_connector(inventory, conn_name, conn_arg, conn)) { + log_error("unable to initialize connector"); + inventory_free(inventory); + return 1; + } + + printf("connector initialized: %p\n", connector.container.instance.instance); + } + + // initialize the OS plugin + OsInstance os; + if (inventory_create_os(inventory, os_name, os_arg, conn, &os)) { + log_error("unable to initialize os plugin"); + inventory_free(inventory); + return 1; + } + + printf("os plugin initialized: %p\n", os.container.instance.instance); + + // find a specific process based on its name via process_by_name + ProcessInstance target_process; + if (!(ret = mf_osinstance_process_by_name(&os, STR(target_proc), &target_process))) { + const struct ProcessInfo *info = mf_processinstance_info(&target_process); + + printf("%s process found: 0x%lx] %d %s %s\n", target_proc, info->address, + info->pid, info->name, info->path); + + // iterate over all module info structs and collect them in a buffer + COLLECT_CB(ModuleInfo, module_info); + mf_processinstance_module_list_callback(&target_process, NULL, module_info); + for (size_t i = 0; i < module_info_base.size; i++) { + ModuleInfo *module_info = &((ModuleInfo *)module_info_base.buf)[i]; + printf("%s module found: 0x%lx] 0x%lx %s %s\n", target_proc, module_info->address, + module_info->base, module_info->name, module_info->path); + } + free(module_info_base.buf); + + // cleanup the processinstance + mf_processinstance_drop(target_process); + } else { + printf("Unable to find %s\n", target_proc); + log_debug_errorcode(ret); + } + + // This will also free the connector here + // as it was _moved_ into the os by `inventory_create_os` + mf_osinstance_drop(os); + log_info("os plugin/connector freed"); + + inventory_free(inventory); + log_info("inventory freed"); + + return 0; +} diff --git a/apex_dma/memflow_lib/memflow-ffi/examples/c/phys_mem.c b/apex_dma/memflow_lib/memflow-ffi/examples/c/phys_mem.c new file mode 100644 index 0000000..aee0415 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-ffi/examples/c/phys_mem.c @@ -0,0 +1,46 @@ +#include "memflow.h" + +#include + +int main(int argc, char *argv[]) { + // enable debug level logging + log_init(3); + + Inventory *inv = inventory_scan(); + printf("inv: %p\n", inv); + + const char *conn_name = argc > 1 ? argv[1] : "kvm"; + const char *conn_arg = argc > 2 ? argv[2] : ""; + + ConnectorInstance conn; + if (!inventory_create_connector(inv, conn_name, conn_arg, &conn)) { + for (int i = 0; i < 1000 * 1000; i++) { + uint8_t buffer[0x1000]; + + ConnectorInstance cloned = mf_connectorinstance_clone(&conn); + + mf_connectorinstance_drop(cloned); + + MemoryView phys_view = mf_connectorinstance_phys_view(&conn); + + // regular read_into + mf_read_raw_into(&phys_view, 0x1000 + i, MUT_SLICE(u8, buffer, sizeof(buffer))); + + // read multiple + ReadData read_data = {0x1000 + i, {buffer, sizeof(buffer)}}; + mf_read_raw_list(&phys_view, MUT_SLICE(ReadData, &read_data, 1)); + + printf("Read: %lx\n", *(uint64_t *)buffer); + + mf_memoryview_drop(phys_view); + } + + mf_connectorinstance_drop(conn); + printf("conn dropped!\n"); + } + + inventory_free(inv); + printf("inv freed!\n"); + + return 0; +} diff --git a/apex_dma/memflow_lib/memflow-ffi/examples/c/process_list.c b/apex_dma/memflow_lib/memflow-ffi/examples/c/process_list.c new file mode 100644 index 0000000..15063ec --- /dev/null +++ b/apex_dma/memflow_lib/memflow-ffi/examples/c/process_list.c @@ -0,0 +1,125 @@ +#include "memflow.h" + +#include + +bool list_processes(OsInstance *os, Address addr) { + + int ret; + + ProcessInstance process; + if ((ret = mf_osinstance_process_by_address(os, addr, &process))) { + log_debug_errorcode(ret); + return true; + } + + const struct ProcessInfo *info = mf_processinstance_info(&process); + + ModuleInfo primary_module; + if ((ret = mf_processinstance_primary_module(&process, &primary_module))) { + // no primary module found, continue iteration - this should _never_ happen + printf("%d\t%s\t0x%lx\tN/A\n", info->pid, info->name, info->address); + log_debug_errorcode(ret); + return true; + } + + printf("%d\t%s\t0x%lx\t0x%lx\n", info->pid, info->name, info->address, + primary_module.address); + + // iterate over all module addresses and collect them in an array + struct ModuleAddressInfo module_addresses[256]; + COLLECT_CB_INTO_ARR(ModuleAddressInfo, module_address, module_addresses); + mf_processinstance_module_address_list_callback(&process, NULL, module_address); + + printf("Read %zu modules\n", module_address_base.size); + + // iterate over all module info structs and collect them in a buffer + COLLECT_CB(ModuleInfo, module_info); + mf_processinstance_module_list_callback(&process, NULL, module_info); + printf("Read %zu modules\n", module_info_base.size); + free(module_info_base.buf); + + // iterate over all imports and collect them in a buffer + COLLECT_CB(ImportInfo, import_info); + mf_processinstance_module_import_list_callback(&process, &primary_module, import_info); + printf("Read %zu imports\n", import_info_base.size); + free(import_info_base.buf); + + // iterate over all exports and collect them in a buffer + COLLECT_CB(ExportInfo, exports); + mf_processinstance_module_export_list_callback(&process, &primary_module, exports); + printf("Read %zu exports\n", exports_base.size); + free(exports_base.buf); + + // iterate over all sections and collect them in a buffer + COLLECT_CB(SectionInfo, sections); + mf_processinstance_module_section_list_callback(&process, &primary_module, sections); + printf("Read %zu sections\n", sections_base.size); + free(sections_base.buf); + + mf_processinstance_drop(process); + + return true; +} + +int main(int argc, char *argv[]) { + // enable debug level logging + log_init(2); + + // load all available plugins + Inventory *inventory = inventory_scan(); + + printf("inventory initialized: %p\n", inventory); + + const char *conn_name = argc > 1 ? argv[1] : "kvm"; + const char *conn_arg = argc > 2 ? argv[2] : ""; + const char *os_name = argc > 3 ? argv[3]: "win32"; + const char *os_arg = argc > 4? argv[4]: ""; + + ConnectorInstance connector, *conn = conn_name[0] ? &connector : NULL; + + // initialize the connector plugin + if (conn) { + if (inventory_create_connector(inventory, conn_name, conn_arg, conn)) { + printf("unable to initialize connector\n"); + inventory_free(inventory); + return 1; + } + + printf("connector initialized: %p\n", connector.container.instance.instance); + } + + // initialize the OS plugin + OsInstance os; + if (inventory_create_os(inventory, os_name, os_arg, conn, &os)) { + printf("unable to initialize os plugin\n"); + inventory_free(inventory); + return 1; + } + + printf("os plugin initialized: %p\n", os.container.instance.instance); + + // iterate over all processes and print them manually + printf("Pid\tNAME\tADDRESS\tMAIN_MODULE\n"); + mf_osinstance_process_address_list_callback(&os, CALLBACK(Address, &os, list_processes)); + + // count all processes + COUNT_CB(Address, process_address); + mf_osinstance_process_address_list_callback(&os, process_address); + printf("Counted %zu processes\n", process_address_count); + + // iterate over all process info structs and collect them in an array + struct ProcessInfo process_info[256]; + COLLECT_CB_INTO_ARR(ProcessInfo, process_info_cb, process_info); + mf_osinstance_process_info_list_callback(&os, process_info_cb); + printf("Read %zu process infos\n", process_info_cb_base.size); + + // This will also free the connector here + // as it was _moved_ into the os by `inventory_create_os` + mf_osinstance_drop(os); + printf("os plugin/connector freed\n"); + + inventory_free(inventory); + printf("inventory freed\n"); + + return 0; +} diff --git a/apex_dma/memflow_lib/memflow-ffi/examples/cpp/Makefile b/apex_dma/memflow_lib/memflow-ffi/examples/cpp/Makefile new file mode 100644 index 0000000..7f53420 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-ffi/examples/cpp/Makefile @@ -0,0 +1,19 @@ +CC = g++ +CFLAGS = -g -O0 -pedantic -std=c++14 -I../../../memflow-ffi/ -L../../../target/release +LIBS=-lm -ldl -lpthread -l:libmemflow_ffi.a + +ODIR=./ + +%.o: %.cpp $(DEPS) + $(CC) -c -o $@ $< $(CFLAGS) + +plist.out: plist.o + $(CC) -o $@ $^ $(CFLAGS) $(LIBS) + +.PHONY: all +all: plist.out + +.DEFAULT_GOAL := all + +clean: + rm -f $(ODIR)/*.o diff --git a/apex_dma/memflow_lib/memflow-ffi/examples/cpp/plist.cpp b/apex_dma/memflow_lib/memflow-ffi/examples/cpp/plist.cpp new file mode 100644 index 0000000..17aec14 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-ffi/examples/cpp/plist.cpp @@ -0,0 +1,104 @@ +#include "memflow.hpp" +#include +#include +#include +#include + +void fmt_arch(char *arch, int n, ArchitectureIdent ident); + +int main(int argc, char *argv[], const int t[]) +{ + log_init(LevelFilter::LevelFilter_Info); + int arr[] = {1, 2, 3, 4, 5}; + int b=3; + //CSliceMut((char*)arr); + CSliceRef((char*)t, sizeof(int)); + + /*Inventory *inventory = inventory_scan(); + + if (!inventory) + { + log_error("unable to create inventory"); + return 1; + } + + printf("inventory initialized: %p\n", inventory); + + const char *conn_name = argc > 1 ? argv[1] : "kvm"; + const char *conn_arg = argc > 2 ? argv[2] : ""; + const char *os_name = argc > 3 ? argv[3] : "win32"; + const char *os_arg = argc > 4 ? argv[4] : ""; + + // ConnectorInstance<> connector, *conn = conn_name[0] ? &connector : nullptr; + + std::unique_ptr> connector = nullptr; + + if (!connector) + { + connector = std::make_unique>(); + if (inventory_create_connector(inventory, conn_name, conn_arg, connector.get())) + { + printf("unable to initialize connector\n"); + inventory_free(inventory); + return 1; + } + + printf("connector initialized: %p\n", connector->container.instance.instance); + } + + OsInstance<> os; + + if (inventory_create_os(inventory, os_name, os_arg, connector.get(), &os)) + { + printf("unable to initialize OS\n"); + inventory_free(inventory); + return 1; + } + + inventory_free(inventory); + + printf("os initialized: %p\n", os.container.instance.instance); + + auto info = os.info(); + char arch[11]; + fmt_arch(arch, sizeof(arch), info->arch); + + printf("Kernel base: %llx\nKernel size: %llx\nArchitecture: %s\n", info->base, info->size, arch); + + printf("Process List:\n"); + + printf("%-4s | %-8s | %-10s | %-10s | %s\n", "Seq", "Pid", "Sys Arch", "Proc Arch", "Name"); + + int i = 0; + + os.process_info_list_callback([&i](ProcessInfo info) + { + char sys_arch[11]; + char proc_arch[11]; + + fmt_arch(sys_arch, sizeof(sys_arch), info.sys_arch); + fmt_arch(proc_arch, sizeof(proc_arch), info.proc_arch); + + printf("%-4d | %-8d | %-10s | %-10s | %s\n", i++, info.pid, sys_arch, proc_arch, info.name); + + return true; }); + + */ + + return 0; +} + +void fmt_arch(char *arch, int n, ArchitectureIdent ident) +{ + switch (ident.tag) + { + case ArchitectureIdent::Tag::ArchitectureIdent_X86: + snprintf(arch, n, "X86_%d", ident.x86._0); + break; + case ArchitectureIdent::Tag::ArchitectureIdent_AArch64: + snprintf(arch, n, "AArch64"); + break; + default: + snprintf(arch, n, "Unknown"); + } +} diff --git a/apex_dma/memflow_lib/memflow-ffi/examples/phys_mem.c b/apex_dma/memflow_lib/memflow-ffi/examples/phys_mem.c deleted file mode 100644 index fb81a8a..0000000 --- a/apex_dma/memflow_lib/memflow-ffi/examples/phys_mem.c +++ /dev/null @@ -1,35 +0,0 @@ -#include "memflow.h" -#include - -int main(int argc, char *argv[]) -{ - log_init(4); - - ConnectorInventory *inv = inventory_scan(); - printf("inv: %p\n", inv); - - const char *conn_name = argc > 1? argv[1]: "qemu_procfs"; - const char *conn_arg = argc > 2? argv[2]: ""; - - CloneablePhysicalMemoryObj *conn = inventory_create_connector(inv, conn_name, conn_arg); - printf("conn: %p\n", conn); - - if (conn) { - PhysicalMemoryObj *phys_mem = downcast_cloneable(conn); - printf("phys_mem: %p\n", phys_mem); - - uint64_t read = phys_read_u64(phys_mem, addr_to_paddr(0x30000)); - - printf("Read: %lx\n", read); - - phys_free(phys_mem); - - connector_free(conn); - printf("conn freed!\n"); - } - - inventory_free(inv); - printf("inv freed!\n"); - - return 0; -} diff --git a/apex_dma/memflow_lib/memflow-ffi/memflow.h b/apex_dma/memflow_lib/memflow-ffi/memflow.h index 7655946..c64bb94 100644 --- a/apex_dma/memflow_lib/memflow-ffi/memflow.h +++ b/apex_dma/memflow_lib/memflow-ffi/memflow.h @@ -1,10 +1,135 @@ #ifndef MEMFLOW_H #define MEMFLOW_H +// Construct a typed slice for rust functions +#define REF_SLICE(ty, buf, len) ((struct CSliceRef_##ty){(buf), (len)}) + +// Constructs a typed mutable slice for rust functions +#define MUT_SLICE(ty, buf, len) ((struct CSliceMut_##ty){(buf), (len)}) + +// Constructs a slice from a string for rust functions +// Note that strlen() is optimized out for string literals here +#define STR(string) \ + REF_SLICE(u8, (const unsigned char *)string, strlen(string)) + +// Constructs a callback +#define CALLBACK(ty, ctx, func) \ + (struct Callback_c_void__##ty){(ctx), (bool (*)(void *, ty))(func)} + +// Constructs a dynamic collect callback +// +// This callback will collect all elements into a buffer accessible within `(*name_data)`. +// It is the same buffer as `name_base.buf`, but cast into the correct type. The buffer must +// be freed with `free(3)`. +// +// Number of elements is accessible within `name_base.size`, alongside its capacity. +// +// After creation, this callback should not exit its scope. +#define COLLECT_CB(ty, name) \ + struct CollectBase name##_base = {}; \ + ty **name##_data = (ty **)&name##_base.buf; \ + Callback_c_void__##ty name = CALLBACK(ty, &name##_base, cb_collect_dynamic_##ty) + +// Constructs a static collect callback +// +// This callback will collect all elements into the provided buffer up to given length. +// +// Any additional elements that do not fit will be skipped. +// +// Number of elements is accessible within `name_base.size`. +// +// After creation, this callback should not exit its scope. +#define COLLECT_CB_INTO(ty, name, data, len) \ + struct CollectBase name##_base = (struct CollectBase){ (void *)data, (size_t)len, 0 }; \ + ty **name##_data = (ty **)&name##_base.buf; \ + Callback_c_void__##ty name = CALLBACK(ty, &name##_base, cb_collect_static_##ty) + +// Constructs a static collect callback (for arrays) +// +// This is the same as `COLLECT_CB_INTO`, but performs an automatic array size calculation. +// +// Number of elements is accessible within `name_base.size`. +// +// After creation, this callback should not exit its scope. +#define COLLECT_CB_INTO_ARR(ty, name, data) \ + COLLECT_CB_INTO(ty, name, data, sizeof(data) / sizeof(*data)) + +// Constructs a count callback +// +// This callback will simply count the number of elements encountered, and this value is +// accessible through `name_count` variable. +// +// After creation, this callback should not exit its scope. +#define COUNT_CB(ty, name) \ + size_t name##_count = 0; \ + Callback_c_void__##ty name = CALLBACK(ty, &name##_count, cb_count_##ty) + +#define BUF_ITER_SPEC(ty, ty2, name, buf, len) \ + struct BufferIterator name##_base = (struct BufferIterator){(const void *)(const ty2 *)buf, len, 0, sizeof(ty2)}; \ + CIterator_##ty name = (CIterator_##ty){ &name##_base, (int32_t (*)(void *, ty2 *))buf_iter_next } + +#define BUF_ITER_ARR_SPEC(ty, ty2, name, buf) BUF_ITER_SPEC(ty, ty2, name, buf, sizeof(buf) / sizeof(*buf)) + +#define BUF_ITER(ty, name, buf, len) \ + BUF_ITER_SPEC(ty, ty, name, buf, len) + +#define BUF_ITER_ARR(ty, name, buf) BUF_ITER(ty, name, buf, sizeof(buf) / sizeof(*buf)) + +// Forward declarations for vtables and their wrappers +struct CGlueTraitObj_CBox_c_void_____CpuStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____CpuStateRetTmp_CArc_c_void______________CArc_c_void_____CpuStateRetTmp_CArc_c_void; +struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____CpuStateRetTmp_CArc_c_void; +struct CGlueTraitObj_CBox_c_void_____KeyboardStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardStateRetTmp_CArc_c_void______________CArc_c_void_____KeyboardStateRetTmp_CArc_c_void; +struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardStateRetTmp_CArc_c_void; +struct CGlueTraitObj_CBox_c_void_____KeyboardVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardRetTmp_CArc_c_void______________CArc_c_void_____KeyboardRetTmp_CArc_c_void; +struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardRetTmp_CArc_c_void; +struct CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void; +struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void; +struct ConnectorInstance_CBox_c_void_____CArc_c_void; +struct ConnectorInstanceContainer_CBox_c_void_____CArc_c_void; +struct IntoCpuState_CBox_c_void_____CArc_c_void; +struct IntoCpuStateContainer_CBox_c_void_____CArc_c_void; +struct IntoCpuState_CBox_c_void_____CArc_c_void; +struct IntoCpuStateContainer_CBox_c_void_____CArc_c_void; +struct ConnectorInstance_CBox_c_void_____CArc_c_void; +struct ConnectorInstanceContainer_CBox_c_void_____CArc_c_void; +struct OsInstance_CBox_c_void_____CArc_c_void; +struct OsInstanceContainer_CBox_c_void_____CArc_c_void; +struct OsInstance_CBox_c_void_____CArc_c_void; +struct OsInstanceContainer_CBox_c_void_____CArc_c_void; +struct OsInstance_CBox_c_void_____CArc_c_void; +struct OsInstanceContainer_CBox_c_void_____CArc_c_void; +struct IntoKeyboard_CBox_c_void_____CArc_c_void; +struct IntoKeyboardContainer_CBox_c_void_____CArc_c_void; +struct IntoKeyboard_CBox_c_void_____CArc_c_void; +struct IntoKeyboardContainer_CBox_c_void_____CArc_c_void; +struct OsInstance_CBox_c_void_____CArc_c_void; +struct OsInstanceContainer_CBox_c_void_____CArc_c_void; +struct OsInstance_CBox_c_void_____CArc_c_void; +struct OsInstanceContainer_CBox_c_void_____CArc_c_void; +struct OsInstance_CBox_c_void_____CArc_c_void; +struct OsInstanceContainer_CBox_c_void_____CArc_c_void; +struct ProcessInstance_CBox_c_void_____CArc_c_void; +struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void; +struct ProcessInstance_CBox_c_void_____CArc_c_void; +struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void; +struct ProcessInstance_CBox_c_void_____CArc_c_void; +struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void; +struct IntoProcessInstance_CBox_c_void_____CArc_c_void; +struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void; +struct IntoProcessInstance_CBox_c_void_____CArc_c_void; +struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void; +struct IntoProcessInstance_CBox_c_void_____CArc_c_void; +struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void; +struct IntoProcessInstance_CBox_c_void_____CArc_c_void; +struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void; +struct ConnectorInstance_CBox_c_void_____CArc_c_void; +struct ConnectorInstanceContainer_CBox_c_void_____CArc_c_void; + #include #include #include #include +typedef void *Library; /** * Identifies the byte order of a architecture @@ -22,49 +147,178 @@ enum Endianess /** * Little Endianess */ - LittleEndian, + Endianess_LittleEndian, /** * Big Endianess */ - BigEndian, + Endianess_BigEndian, }; #ifndef __cplusplus typedef uint8_t Endianess; #endif // __cplusplus -typedef struct ArchitectureObj ArchitectureObj; - -typedef struct CloneablePhysicalMemoryObj CloneablePhysicalMemoryObj; - /** - * Holds an inventory of available connectors. + * An enum representing the available verbosity levels of the logger. + * + * Typical usage includes: checking if a certain `Level` is enabled with + * [`log_enabled!`](macro.log_enabled.html), specifying the `Level` of + * [`log!`](macro.log.html), and comparing a `Level` directly to a + * [`LevelFilter`](enum.LevelFilter.html). */ -typedef struct ConnectorInventory ConnectorInventory; - -typedef struct OsProcessInfoObj OsProcessInfoObj; - -typedef struct OsProcessModuleInfoObj OsProcessModuleInfoObj; - -typedef struct PhysicalMemoryObj PhysicalMemoryObj; - -typedef struct PhysicalReadData PhysicalReadData; +enum Level +#ifdef __cplusplus + : uintptr_t +#endif // __cplusplus + { + /** + * The "error" level. + * + * Designates very serious errors. + */ + Level_Error = 1, + /** + * The "warn" level. + * + * Designates hazardous situations. + */ + Level_Warn, + /** + * The "info" level. + * + * Designates useful information. + */ + Level_Info, + /** + * The "debug" level. + * + * Designates lower priority information. + */ + Level_Debug, + /** + * The "trace" level. + * + * Designates very low priority, often extremely verbose, information. + */ + Level_Trace, +}; +#ifndef __cplusplus +typedef uintptr_t Level; +#endif // __cplusplus -typedef struct PhysicalWriteData PhysicalWriteData; +/** + * An enum representing the available verbosity level filters of the logger. + * + * A `LevelFilter` may be compared directly to a [`Level`]. Use this type + * to get and set the maximum log level with [`max_level()`] and [`set_max_level`]. + * + * [`Level`]: enum.Level.html + * [`max_level()`]: fn.max_level.html + * [`set_max_level`]: fn.set_max_level.html + */ +enum LevelFilter +#ifdef __cplusplus + : uintptr_t +#endif // __cplusplus + { + /** + * A level lower than all log levels. + */ + LevelFilter_Off, + /** + * Corresponds to the `Error` log level. + */ + LevelFilter_Error, + /** + * Corresponds to the `Warn` log level. + */ + LevelFilter_Warn, + /** + * Corresponds to the `Info` log level. + */ + LevelFilter_Info, + /** + * Corresponds to the `Debug` log level. + */ + LevelFilter_Debug, + /** + * Corresponds to the `Trace` log level. + */ + LevelFilter_Trace, +}; +#ifndef __cplusplus +typedef uintptr_t LevelFilter; +#endif // __cplusplus -typedef struct VirtualMemoryObj VirtualMemoryObj; +typedef struct ArchitectureObj ArchitectureObj; -typedef struct VirtualReadData VirtualReadData; +/** + * The core of the plugin system + * + * It scans system directories and collects valid memflow plugins. They can then be instantiated + * easily. The reason the libraries are collected is to allow for reuse, and save performance + * + * # Examples + * + * Creating a OS instance, the recommended way: + * + * ```no_run + * use memflow::plugins::Inventory; + * # use memflow::plugins::OsInstanceArcBox; + * # use memflow::error::Result; + * # fn test() -> Result> { + * let inventory = Inventory::scan(); + * inventory + * .builder() + * .connector("qemu") + * .os("win32") + * .build() + * # } + * # test().ok(); + * ``` + * + * Nesting connectors and os plugins: + * ```no_run + * use memflow::plugins::{Inventory, Args}; + * # use memflow::error::Result; + * # fn test() -> Result<()> { + * let inventory = Inventory::scan(); + * let os = inventory + * .builder() + * .connector("qemu") + * .os("linux") + * .connector("qemu") + * .os("win32") + * .build(); + * # Ok(()) + * # } + * # test().ok(); + * ``` + */ +typedef struct Inventory Inventory; -typedef struct VirtualWriteData VirtualWriteData; +/** + * The largest target memory type + * The following core rule is defined for these memory types: + * + * `PAGE_SIZE < usize <= umem` + * + * Where `PAGE_SIZE` is any lowest granularity page size, `usize` is the standard size type, and + * `umem` is memflow's memory size type. + * + * This means that `usize` can always be safely cast to `umem`, while anything to do with page + * sizes can be cast to `umem` safely, + * + */ +typedef uint64_t umem; /** * This type represents a address on the target system. - * It internally holds a `u64` value but can also be used + * It internally holds a `umem` value but can also be used * when working in 32-bit environments. * * This type will not handle overflow for 32-bit or 64-bit addresses / lengths. */ -typedef uint64_t Address; +typedef umem Address; /** * A address with the value of zero. * @@ -77,6 +331,18 @@ typedef uint64_t Address; * ``` */ #define Address_NULL 0 +/** + * A address with an invalid value. + * + * # Examples + * + * ``` + * use memflow::types::Address; + * + * println!("address: {}", Address::INVALID); + * ``` + */ +#define Address_INVALID ~0 /** * Describes the type of a page using a bitflag. @@ -85,27 +351,27 @@ typedef uint8_t PageType; /** * The page explicitly has no flags. */ -#define PageType_NONE (uint8_t)0 +#define PageType_NONE 0 /** * The page type is not known. */ -#define PageType_UNKNOWN (uint8_t)1 +#define PageType_UNKNOWN 1 /** * The page contains page table entries. */ -#define PageType_PAGE_TABLE (uint8_t)2 +#define PageType_PAGE_TABLE 2 /** * The page is a writeable page. */ -#define PageType_WRITEABLE (uint8_t)4 +#define PageType_WRITEABLE 4 /** * The page is read only. */ -#define PageType_READ_ONLY (uint8_t)8 +#define PageType_READ_ONLY 8 /** * The page is not executable. */ -#define PageType_NOEXEC (uint8_t)16 +#define PageType_NOEXEC 16 /** * This type represents a wrapper over a [address](address/index.html) @@ -123,395 +389,3339 @@ typedef struct PhysicalAddress { PageType page_type; uint8_t page_size_log2; } PhysicalAddress; - -typedef struct PhysicalMemoryMetadata { - uintptr_t size; - bool readonly; -} PhysicalMemoryMetadata; - /** - * Type alias for a PID. + * A physical address with an invalid value. */ -typedef uint32_t PID; - -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -extern const struct ArchitectureObj *X86_32; +#define PhysicalAddress_INVALID (PhysicalAddress){ .address = Address_INVALID, .page_type = PageType_UNKNOWN, .page_size_log2 = 0 } -extern const struct ArchitectureObj *X86_32_PAE; +/** + * FFI-safe box + * + * This box has a static self reference, alongside a custom drop function. + * + * The drop function can be called from anywhere, it will free on correct allocator internally. + */ +typedef struct CBox_c_void { + void *instance; + void (*drop_fn)(void*); +} CBox_c_void; -extern const struct ArchitectureObj *X86_64; +/** + * FFI-Safe Arc + * + * This is an FFI-Safe equivalent of Arc and Option>. + */ +typedef struct CArc_c_void { + const void *instance; + const void *(*clone_fn)(const void*); + void (*drop_fn)(const void*); +} CArc_c_void; -void log_init(int32_t level_num); +typedef struct ConnectorInstanceContainer_CBox_c_void_____CArc_c_void { + struct CBox_c_void instance; + struct CArc_c_void context; +} ConnectorInstanceContainer_CBox_c_void_____CArc_c_void; /** - * Helper to convert `Address` to a `PhysicalAddress` + * CGlue vtable for trait Clone. * - * This will create a `PhysicalAddress` with `UNKNOWN` PageType. + * This virtual function table contains ABI-safe interface for the given trait. */ -struct PhysicalAddress addr_to_paddr(Address address); +typedef struct CloneVtbl_ConnectorInstanceContainer_CBox_c_void_____CArc_c_void { + struct ConnectorInstanceContainer_CBox_c_void_____CArc_c_void (*clone)(const struct ConnectorInstanceContainer_CBox_c_void_____CArc_c_void *cont); +} CloneVtbl_ConnectorInstanceContainer_CBox_c_void_____CArc_c_void; /** - * Create a new connector inventory + * Wrapper around mutable slices. * - * This function will try to find connectors using PATH environment variable - * - * Note that all functions go through each directories, and look for a `memflow` directory, - * and search for libraries in those. - * - * # Safety - * - * ConnectorInventory is inherently unsafe, because it loads shared libraries which can not be - * guaranteed to be safe. + * This is meant as a safe type to pass across the FFI boundary with similar semantics as regular + * slice. However, not all functionality is present, use the slice conversion functions. */ -struct ConnectorInventory *inventory_scan(void); +typedef struct CSliceMut_u8 { + uint8_t *data; + uintptr_t len; +} CSliceMut_u8; /** - * Create a new inventory with custom path string - * - * # Safety - * - * `path` must be a valid null terminated string + * FFI-safe 3 element tuple. */ -struct ConnectorInventory *inventory_scan_path(const char *path); +typedef struct CTup3_PhysicalAddress__Address__CSliceMut_u8 { + struct PhysicalAddress _0; + Address _1; + struct CSliceMut_u8 _2; +} CTup3_PhysicalAddress__Address__CSliceMut_u8; /** - * Add a directory to an existing inventory - * - * # Safety - * - * `dir` must be a valid null terminated string + * MemData type for physical memory reads. */ -int32_t inventory_add_dir(struct ConnectorInventory *inv, const char *dir); +typedef struct CTup3_PhysicalAddress__Address__CSliceMut_u8 PhysicalReadData; /** - * Create a connector with given arguments + * FFI compatible iterator. * - * This creates an instance of a `CloneablePhysicalMemory`. To use it for physical memory - * operations, please call `downcast_cloneable` to create a instance of `PhysicalMemory`. + * Any mutable reference to an iterator can be converted to a `CIterator`. * - * Regardless, this instance needs to be freed using `connector_free`. + * `CIterator` implements `Iterator`. * - * # Arguments + * # Examples * - * * `name` - name of the connector to use - * * `args` - arguments to be passed to the connector upon its creation + * Using [`AsCIterator`](AsCIterator) helper: * - * # Safety + * ``` + * use cglue::iter::{CIterator, AsCIterator}; * - * Both `name`, and `args` must be valid null terminated strings. + * extern "C" fn sum_all(iter: CIterator) -> usize { + * iter.sum() + * } * - * Any error strings returned by the connector must not be outputed after the connector gets - * freed, because that operation could cause the underlying shared library to get unloaded. - */ -struct CloneablePhysicalMemoryObj *inventory_create_connector(struct ConnectorInventory *inv, - const char *name, - const char *args); - -/** - * Clone a connector + * let mut iter = (0..10).map(|v| v * v); * - * This method is useful when needing to perform multithreaded operations, as a connector is not - * guaranteed to be thread safe. Every single cloned instance also needs to be freed using - * `connector_free`. + * assert_eq!(sum_all(iter.as_citer()), 285); + * ``` * - * # Safety + * Converting with `Into` trait: * - * `conn` has to point to a a valid `CloneablePhysicalMemory` created by one of the provided - * functions. - */ -struct CloneablePhysicalMemoryObj *connector_clone(const struct CloneablePhysicalMemoryObj *conn); - -/** - * Free a connector instance + * ``` + * use cglue::iter::{CIterator, AsCIterator}; * - * # Safety + * extern "C" fn sum_all(iter: CIterator) -> usize { + * iter.sum() + * } * - * `conn` has to point to a valid `CloneablePhysicalMemoryObj` created by one of the provided - * functions. + * let mut iter = (0..=10).map(|v| v * v); * - * There has to be no instance of `PhysicalMemory` created from the input `conn`, because they - * will become invalid. + * assert_eq!(sum_all((&mut iter).into()), 385); + * ``` */ -void connector_free(struct CloneablePhysicalMemoryObj *conn); +typedef struct CIterator_PhysicalReadData { + void *iter; + int32_t (*func)(void*, PhysicalReadData *out); +} CIterator_PhysicalReadData; /** - * Free a connector inventory - * - * # Safety - * - * `inv` must point to a valid `ConnectorInventory` that was created using one of the provided - * functions. + * FFI-safe 2 element tuple. */ -void inventory_free(struct ConnectorInventory *inv); +typedef struct CTup2_Address__CSliceMut_u8 { + Address _0; + struct CSliceMut_u8 _1; +} CTup2_Address__CSliceMut_u8; + +typedef struct CTup2_Address__CSliceMut_u8 ReadData; + +typedef struct Callback_c_void__ReadData { + void *context; + bool (*func)(void*, ReadData); +} Callback_c_void__ReadData; + +typedef struct Callback_c_void__ReadData OpaqueCallback_ReadData; /** - * Downcast a cloneable physical memory into a physical memory object. - * - * This function will take a `cloneable` and turn it into a `PhysicalMemoryObj`, which then can be - * used by physical memory functions. + * Data needed to perform memory operations. * - * Please note that this does not free `cloneable`, and the reference is still valid for further - * operations. + * `inp` is an iterator containing */ -struct PhysicalMemoryObj *downcast_cloneable(struct CloneablePhysicalMemoryObj *cloneable); +typedef struct MemOps_PhysicalReadData__ReadData { + struct CIterator_PhysicalReadData inp; + OpaqueCallback_ReadData *out; + OpaqueCallback_ReadData *out_fail; +} MemOps_PhysicalReadData__ReadData; + +typedef struct MemOps_PhysicalReadData__ReadData PhysicalReadMemOps; /** - * Free a `PhysicalMemoryObj` + * Wrapper around const slices. * - * This will free a reference to a `PhysicalMemoryObj`. If the physical memory object was created - * using `downcast_cloneable`, this will NOT free the cloneable reference. + * This is meant as a safe type to pass across the FFI boundary with similar semantics as regular + * slice. However, not all functionality is present, use the slice conversion functions. * - * # Safety + * # Examples * - * `mem` must point to a valid `PhysicalMemoryObj` that was created using one of the provided - * functions. - */ -void phys_free(struct PhysicalMemoryObj *mem); - -/** - * Read a list of values + * Simple conversion: + * + * ``` + * use cglue::slice::CSliceRef; * - * This will perform `len` physical memory reads on the provided `data`. Using lists is preferable - * for performance, because then the underlying connectors can batch those operations. + * let arr = [0, 5, 3, 2]; * - * # Safety + * let cslice = CSliceRef::from(&arr[..]); * - * `data` must be a valid array of `PhysicalReadData` with the length of at least `len` + * let slice = cslice.as_slice(); + * + * assert_eq!(&arr, slice); + * ``` */ -int32_t phys_read_raw_list(struct PhysicalMemoryObj *mem, - struct PhysicalReadData *data, - uintptr_t len); +typedef struct CSliceRef_u8 { + const uint8_t *data; + uintptr_t len; +} CSliceRef_u8; /** - * Write a list of values - * - * This will perform `len` physical memory writes on the provided `data`. Using lists is preferable - * for performance, because then the underlying connectors can batch those operations. - * - * # Safety - * - * `data` must be a valid array of `PhysicalWriteData` with the length of at least `len` + * FFI-safe 3 element tuple. */ -int32_t phys_write_raw_list(struct PhysicalMemoryObj *mem, - const struct PhysicalWriteData *data, - uintptr_t len); +typedef struct CTup3_PhysicalAddress__Address__CSliceRef_u8 { + struct PhysicalAddress _0; + Address _1; + struct CSliceRef_u8 _2; +} CTup3_PhysicalAddress__Address__CSliceRef_u8; /** - * Retrieve metadata about the physical memory object + * MemData type for physical memory writes. */ -struct PhysicalMemoryMetadata phys_metadata(const struct PhysicalMemoryObj *mem); +typedef struct CTup3_PhysicalAddress__Address__CSliceRef_u8 PhysicalWriteData; /** - * Read a single value into `out` from a provided `PhysicalAddress` + * FFI compatible iterator. * - * # Safety + * Any mutable reference to an iterator can be converted to a `CIterator`. + * + * `CIterator` implements `Iterator`. + * + * # Examples + * + * Using [`AsCIterator`](AsCIterator) helper: + * + * ``` + * use cglue::iter::{CIterator, AsCIterator}; + * + * extern "C" fn sum_all(iter: CIterator) -> usize { + * iter.sum() + * } * - * `out` must be a valid pointer to a data buffer of at least `len` size. + * let mut iter = (0..10).map(|v| v * v); + * + * assert_eq!(sum_all(iter.as_citer()), 285); + * ``` + * + * Converting with `Into` trait: + * + * ``` + * use cglue::iter::{CIterator, AsCIterator}; + * + * extern "C" fn sum_all(iter: CIterator) -> usize { + * iter.sum() + * } + * + * let mut iter = (0..=10).map(|v| v * v); + * + * assert_eq!(sum_all((&mut iter).into()), 385); + * ``` */ -int32_t phys_read_raw_into(struct PhysicalMemoryObj *mem, - struct PhysicalAddress addr, - uint8_t *out, - uintptr_t len); +typedef struct CIterator_PhysicalWriteData { + void *iter; + int32_t (*func)(void*, PhysicalWriteData *out); +} CIterator_PhysicalWriteData; /** - * Read a single 32-bit value from a provided `PhysicalAddress` + * FFI-safe 2 element tuple. */ -uint32_t phys_read_u32(struct PhysicalMemoryObj *mem, struct PhysicalAddress addr); +typedef struct CTup2_Address__CSliceRef_u8 { + Address _0; + struct CSliceRef_u8 _1; +} CTup2_Address__CSliceRef_u8; + +typedef struct CTup2_Address__CSliceRef_u8 WriteData; + +typedef struct Callback_c_void__WriteData { + void *context; + bool (*func)(void*, WriteData); +} Callback_c_void__WriteData; + +typedef struct Callback_c_void__WriteData OpaqueCallback_WriteData; /** - * Read a single 64-bit value from a provided `PhysicalAddress` + * Data needed to perform memory operations. + * + * `inp` is an iterator containing */ -uint64_t phys_read_u64(struct PhysicalMemoryObj *mem, struct PhysicalAddress addr); +typedef struct MemOps_PhysicalWriteData__WriteData { + struct CIterator_PhysicalWriteData inp; + OpaqueCallback_WriteData *out; + OpaqueCallback_WriteData *out_fail; +} MemOps_PhysicalWriteData__WriteData; + +typedef struct MemOps_PhysicalWriteData__WriteData PhysicalWriteMemOps; + +typedef struct PhysicalMemoryMetadata { + Address max_address; + umem real_size; + bool readonly; + uint32_t ideal_batch_size; +} PhysicalMemoryMetadata; + +typedef struct PhysicalMemoryMapping { + Address base; + umem size; + Address real_base; +} PhysicalMemoryMapping; /** - * Write a single value from `input` into a provided `PhysicalAddress` + * Wrapper around const slices. * - * # Safety + * This is meant as a safe type to pass across the FFI boundary with similar semantics as regular + * slice. However, not all functionality is present, use the slice conversion functions. + * + * # Examples + * + * Simple conversion: + * + * ``` + * use cglue::slice::CSliceRef; + * + * let arr = [0, 5, 3, 2]; + * + * let cslice = CSliceRef::from(&arr[..]); + * + * let slice = cslice.as_slice(); * - * `input` must be a valid pointer to a data buffer of at least `len` size. + * assert_eq!(&arr, slice); + * ``` */ -int32_t phys_write_raw(struct PhysicalMemoryObj *mem, - struct PhysicalAddress addr, - const uint8_t *input, - uintptr_t len); +typedef struct CSliceRef_PhysicalMemoryMapping { + const struct PhysicalMemoryMapping *data; + uintptr_t len; +} CSliceRef_PhysicalMemoryMapping; /** - * Write a single 32-bit value into a provided `PhysicalAddress` + * FFI-safe 3 element tuple. */ -int32_t phys_write_u32(struct PhysicalMemoryObj *mem, struct PhysicalAddress addr, uint32_t val); +typedef struct CTup3_Address__Address__CSliceMut_u8 { + Address _0; + Address _1; + struct CSliceMut_u8 _2; +} CTup3_Address__Address__CSliceMut_u8; /** - * Write a single 64-bit value into a provided `PhysicalAddress` + * MemData type for regular memory reads. */ -int32_t phys_write_u64(struct PhysicalMemoryObj *mem, struct PhysicalAddress addr, uint64_t val); +typedef struct CTup3_Address__Address__CSliceMut_u8 ReadDataRaw; /** - * Free a virtual memory object reference + * FFI compatible iterator. * - * This function frees the reference to a virtual memory object. + * Any mutable reference to an iterator can be converted to a `CIterator`. * - * # Safety + * `CIterator` implements `Iterator`. * - * `mem` must be a valid reference to a virtual memory object. - */ -void virt_free(struct VirtualMemoryObj *mem); - -/** - * Read a list of values + * # Examples * - * This will perform `len` virtual memory reads on the provided `data`. Using lists is preferable - * for performance, because then the underlying connectors can batch those operations, and virtual - * translation function can cut down on read operations. + * Using [`AsCIterator`](AsCIterator) helper: * - * # Safety + * ``` + * use cglue::iter::{CIterator, AsCIterator}; * - * `data` must be a valid array of `VirtualReadData` with the length of at least `len` - */ -int32_t virt_read_raw_list(struct VirtualMemoryObj *mem, - struct VirtualReadData *data, - uintptr_t len); - -/** - * Write a list of values + * extern "C" fn sum_all(iter: CIterator) -> usize { + * iter.sum() + * } * - * This will perform `len` virtual memory writes on the provided `data`. Using lists is preferable - * for performance, because then the underlying connectors can batch those operations, and virtual - * translation function can cut down on read operations. + * let mut iter = (0..10).map(|v| v * v); * - * # Safety + * assert_eq!(sum_all(iter.as_citer()), 285); + * ``` + * + * Converting with `Into` trait: + * + * ``` + * use cglue::iter::{CIterator, AsCIterator}; * - * `data` must be a valid array of `VirtualWriteData` with the length of at least `len` + * extern "C" fn sum_all(iter: CIterator) -> usize { + * iter.sum() + * } + * + * let mut iter = (0..=10).map(|v| v * v); + * + * assert_eq!(sum_all((&mut iter).into()), 385); + * ``` */ -int32_t virt_write_raw_list(struct VirtualMemoryObj *mem, - const struct VirtualWriteData *data, - uintptr_t len); +typedef struct CIterator_ReadDataRaw { + void *iter; + int32_t (*func)(void*, ReadDataRaw *out); +} CIterator_ReadDataRaw; /** - * Read a single value into `out` from a provided `Address` - * - * # Safety + * Data needed to perform memory operations. * - * `out` must be a valid pointer to a data buffer of at least `len` size. + * `inp` is an iterator containing */ -int32_t virt_read_raw_into(struct VirtualMemoryObj *mem, Address addr, uint8_t *out, uintptr_t len); +typedef struct MemOps_ReadDataRaw__ReadData { + struct CIterator_ReadDataRaw inp; + OpaqueCallback_ReadData *out; + OpaqueCallback_ReadData *out_fail; +} MemOps_ReadDataRaw__ReadData; + +typedef struct MemOps_ReadDataRaw__ReadData ReadRawMemOps; /** - * Read a single 32-bit value from a provided `Address` + * FFI-safe 3 element tuple. */ -uint32_t virt_read_u32(struct VirtualMemoryObj *mem, Address addr); +typedef struct CTup3_Address__Address__CSliceRef_u8 { + Address _0; + Address _1; + struct CSliceRef_u8 _2; +} CTup3_Address__Address__CSliceRef_u8; /** - * Read a single 64-bit value from a provided `Address` + * MemData type for regular memory writes. */ -uint64_t virt_read_u64(struct VirtualMemoryObj *mem, Address addr); +typedef struct CTup3_Address__Address__CSliceRef_u8 WriteDataRaw; /** - * Write a single value from `input` into a provided `Address` + * FFI compatible iterator. * - * # Safety + * Any mutable reference to an iterator can be converted to a `CIterator`. * - * `input` must be a valid pointer to a data buffer of at least `len` size. - */ -int32_t virt_write_raw(struct VirtualMemoryObj *mem, - Address addr, - const uint8_t *input, - uintptr_t len); - -/** - * Write a single 32-bit value into a provided `Address` + * `CIterator` implements `Iterator`. + * + * # Examples + * + * Using [`AsCIterator`](AsCIterator) helper: + * + * ``` + * use cglue::iter::{CIterator, AsCIterator}; + * + * extern "C" fn sum_all(iter: CIterator) -> usize { + * iter.sum() + * } + * + * let mut iter = (0..10).map(|v| v * v); + * + * assert_eq!(sum_all(iter.as_citer()), 285); + * ``` + * + * Converting with `Into` trait: + * + * ``` + * use cglue::iter::{CIterator, AsCIterator}; + * + * extern "C" fn sum_all(iter: CIterator) -> usize { + * iter.sum() + * } + * + * let mut iter = (0..=10).map(|v| v * v); + * + * assert_eq!(sum_all((&mut iter).into()), 385); + * ``` */ -int32_t virt_write_u32(struct VirtualMemoryObj *mem, Address addr, uint32_t val); +typedef struct CIterator_WriteDataRaw { + void *iter; + int32_t (*func)(void*, WriteDataRaw *out); +} CIterator_WriteDataRaw; /** - * Write a single 64-bit value into a provided `Address` + * Data needed to perform memory operations. + * + * `inp` is an iterator containing */ -int32_t virt_write_u64(struct VirtualMemoryObj *mem, Address addr, uint64_t val); - -uint8_t arch_bits(const struct ArchitectureObj *arch); - -Endianess arch_endianess(const struct ArchitectureObj *arch); - -uintptr_t arch_page_size(const struct ArchitectureObj *arch); +typedef struct MemOps_WriteDataRaw__WriteData { + struct CIterator_WriteDataRaw inp; + OpaqueCallback_WriteData *out; + OpaqueCallback_WriteData *out_fail; +} MemOps_WriteDataRaw__WriteData; -uintptr_t arch_size_addr(const struct ArchitectureObj *arch); +typedef struct MemOps_WriteDataRaw__WriteData WriteRawMemOps; -uint8_t arch_address_space_bits(const struct ArchitectureObj *arch); +typedef struct MemoryViewMetadata { + Address max_address; + umem real_size; + bool readonly; + bool little_endian; + uint8_t arch_bits; +} MemoryViewMetadata; /** - * Free an architecture reference + * FFI compatible iterator. + * + * Any mutable reference to an iterator can be converted to a `CIterator`. + * + * `CIterator` implements `Iterator`. + * + * # Examples + * + * Using [`AsCIterator`](AsCIterator) helper: + * + * ``` + * use cglue::iter::{CIterator, AsCIterator}; + * + * extern "C" fn sum_all(iter: CIterator) -> usize { + * iter.sum() + * } + * + * let mut iter = (0..10).map(|v| v * v); + * + * assert_eq!(sum_all(iter.as_citer()), 285); + * ``` + * + * Converting with `Into` trait: + * + * ``` + * use cglue::iter::{CIterator, AsCIterator}; + * + * extern "C" fn sum_all(iter: CIterator) -> usize { + * iter.sum() + * } + * + * let mut iter = (0..=10).map(|v| v * v); + * + * assert_eq!(sum_all((&mut iter).into()), 385); + * ``` + */ +typedef struct CIterator_ReadData { + void *iter; + int32_t (*func)(void*, ReadData *out); +} CIterator_ReadData; + +typedef OpaqueCallback_ReadData ReadCallback; + +/** + * Wrapper around mutable slices. + * + * This is meant as a safe type to pass across the FFI boundary with similar semantics as regular + * slice. However, not all functionality is present, use the slice conversion functions. + */ +typedef struct CSliceMut_ReadData { + ReadData *data; + uintptr_t len; +} CSliceMut_ReadData; + +/** + * FFI compatible iterator. + * + * Any mutable reference to an iterator can be converted to a `CIterator`. + * + * `CIterator` implements `Iterator`. + * + * # Examples + * + * Using [`AsCIterator`](AsCIterator) helper: + * + * ``` + * use cglue::iter::{CIterator, AsCIterator}; + * + * extern "C" fn sum_all(iter: CIterator) -> usize { + * iter.sum() + * } + * + * let mut iter = (0..10).map(|v| v * v); + * + * assert_eq!(sum_all(iter.as_citer()), 285); + * ``` + * + * Converting with `Into` trait: + * + * ``` + * use cglue::iter::{CIterator, AsCIterator}; + * + * extern "C" fn sum_all(iter: CIterator) -> usize { + * iter.sum() + * } + * + * let mut iter = (0..=10).map(|v| v * v); + * + * assert_eq!(sum_all((&mut iter).into()), 385); + * ``` + */ +typedef struct CIterator_WriteData { + void *iter; + int32_t (*func)(void*, WriteData *out); +} CIterator_WriteData; + +typedef OpaqueCallback_WriteData WriteCallback; + +/** + * Wrapper around const slices. + * + * This is meant as a safe type to pass across the FFI boundary with similar semantics as regular + * slice. However, not all functionality is present, use the slice conversion functions. + * + * # Examples + * + * Simple conversion: + * + * ``` + * use cglue::slice::CSliceRef; + * + * let arr = [0, 5, 3, 2]; + * + * let cslice = CSliceRef::from(&arr[..]); + * + * let slice = cslice.as_slice(); + * + * assert_eq!(&arr, slice); + * ``` + */ +typedef struct CSliceRef_WriteData { + const WriteData *data; + uintptr_t len; +} CSliceRef_WriteData; +/** + * Base CGlue trait object for trait MemoryView. + */ +typedef struct CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void MemoryViewBase_CBox_c_void_____CArc_c_void; +/** + * CtxBoxed CGlue trait object for trait MemoryView with context. + */ +typedef MemoryViewBase_CBox_c_void_____CArc_c_void MemoryViewBaseCtxBox_c_void__CArc_c_void; +/** + * Boxed CGlue trait object for trait MemoryView with a [`CArc`](cglue::arc::CArc) reference counted context. + */ +typedef MemoryViewBaseCtxBox_c_void__CArc_c_void MemoryViewBaseArcBox_c_void__c_void; +/** + * Opaque Boxed CGlue trait object for trait MemoryView with a [`CArc`](cglue::arc::CArc) reference counted context. + */ +typedef MemoryViewBaseArcBox_c_void__c_void MemoryViewArcBox; + +/** + * Simple CGlue trait object container. + * + * This is the simplest form of container, represented by an instance, clone context, and + * temporary return context. + * + * `instance` value usually is either a reference, or a mutable reference, or a `CBox`, which + * contains static reference to the instance, and a dedicated drop function for freeing resources. + * + * `context` is either `PhantomData` representing nothing, or typically a `CArc` that can be + * cloned at will, reference counting some resource, like a `Library` for automatic unloading. + * + * `ret_tmp` is usually `PhantomData` representing nothing, unless the trait has functions that + * return references to associated types, in which case space is reserved for wrapping structures. + */ +typedef struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____CpuStateRetTmp_CArc_c_void { + struct CBox_c_void instance; + CArc_c_void context; +} CGlueObjContainer_CBox_c_void_____CArc_c_void_____CpuStateRetTmp_CArc_c_void; +/** + * CGlue vtable for trait CpuState. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +typedef struct CpuStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____CpuStateRetTmp_CArc_c_void { + void (*pause)(struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____CpuStateRetTmp_CArc_c_void *cont); + void (*resume)(struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____CpuStateRetTmp_CArc_c_void *cont); +} CpuStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____CpuStateRetTmp_CArc_c_void; +/** + * Simple CGlue trait object. + * + * This is the simplest form of CGlue object, represented by a container and vtable for a single + * trait. + * + * Container merely is a this pointer with some optional temporary return reference context. + */ +typedef struct CGlueTraitObj_CBox_c_void_____CpuStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____CpuStateRetTmp_CArc_c_void______________CArc_c_void_____CpuStateRetTmp_CArc_c_void { + const struct CpuStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____CpuStateRetTmp_CArc_c_void *vtbl; + struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____CpuStateRetTmp_CArc_c_void container; +} CGlueTraitObj_CBox_c_void_____CpuStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____CpuStateRetTmp_CArc_c_void______________CArc_c_void_____CpuStateRetTmp_CArc_c_void; + +// Typedef for default container and context type +typedef struct CGlueTraitObj_CBox_c_void_____CpuStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____CpuStateRetTmp_CArc_c_void______________CArc_c_void_____CpuStateRetTmp_CArc_c_void CpuState; +/** + * Base CGlue trait object for trait CpuState. + */ +typedef struct CGlueTraitObj_CBox_c_void_____CpuStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____CpuStateRetTmp_CArc_c_void______________CArc_c_void_____CpuStateRetTmp_CArc_c_void CpuStateBase_CBox_c_void_____CArc_c_void; +typedef struct IntoCpuStateContainer_CBox_c_void_____CArc_c_void { + struct CBox_c_void instance; + CArc_c_void context; +} IntoCpuStateContainer_CBox_c_void_____CArc_c_void; +/** + * CGlue vtable for trait Clone. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +typedef struct CloneVtbl_IntoCpuStateContainer_CBox_c_void_____CArc_c_void { + struct IntoCpuStateContainer_CBox_c_void_____CArc_c_void (*clone)(const struct IntoCpuStateContainer_CBox_c_void_____CArc_c_void *cont); +} CloneVtbl_IntoCpuStateContainer_CBox_c_void_____CArc_c_void; +/** + * CGlue vtable for trait CpuState. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +typedef struct CpuStateVtbl_IntoCpuStateContainer_CBox_c_void_____CArc_c_void { + void (*pause)(struct IntoCpuStateContainer_CBox_c_void_____CArc_c_void *cont); + void (*resume)(struct IntoCpuStateContainer_CBox_c_void_____CArc_c_void *cont); +} CpuStateVtbl_IntoCpuStateContainer_CBox_c_void_____CArc_c_void; +/** + * Trait group potentially implementing `:: cglue :: ext :: core :: clone :: Clone < > + CpuState < >` traits. + * + * Optional traits are not implemented here, however. There are numerous conversion + * functions available for safely retrieving a concrete collection of traits. + * + * `check_impl_` functions allow to check if the object implements the wanted traits. + * + * `into_impl_` functions consume the object and produce a new final structure that + * keeps only the required information. + * + * `cast_impl_` functions merely check and transform the object into a type that can + *be transformed back into `IntoCpuState` without losing data. + * + * `as_ref_`, and `as_mut_` functions obtain references to safe objects, but do not + * perform any memory transformations either. They are the safest to use, because + * there is no risk of accidentally consuming the whole object. + */ +typedef struct IntoCpuState_CBox_c_void_____CArc_c_void { + const struct CloneVtbl_IntoCpuStateContainer_CBox_c_void_____CArc_c_void *vtbl_clone; + const struct CpuStateVtbl_IntoCpuStateContainer_CBox_c_void_____CArc_c_void *vtbl_cpustate; + struct IntoCpuStateContainer_CBox_c_void_____CArc_c_void container; +} IntoCpuState_CBox_c_void_____CArc_c_void; + +// Typedef for default container and context type +typedef struct IntoCpuState_CBox_c_void_____CArc_c_void IntoCpuState; +/** + * CGlue vtable for trait ConnectorCpuState. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +typedef struct ConnectorCpuStateVtbl_ConnectorInstanceContainer_CBox_c_void_____CArc_c_void { + int32_t (*cpu_state)(struct ConnectorInstanceContainer_CBox_c_void_____CArc_c_void *cont, + CpuStateBase_CBox_c_void_____CArc_c_void *ok_out); + int32_t (*into_cpu_state)(struct ConnectorInstanceContainer_CBox_c_void_____CArc_c_void cont, + struct IntoCpuState_CBox_c_void_____CArc_c_void *ok_out); +} ConnectorCpuStateVtbl_ConnectorInstanceContainer_CBox_c_void_____CArc_c_void; + +/** + * Trait group potentially implementing `:: cglue :: ext :: core :: clone :: Clone < > + PhysicalMemory < > + ConnectorCpuState < >` traits. + * + * Optional traits are not implemented here, however. There are numerous conversion + * functions available for safely retrieving a concrete collection of traits. + * + * `check_impl_` functions allow to check if the object implements the wanted traits. + * + * `into_impl_` functions consume the object and produce a new final structure that + * keeps only the required information. + * + * `cast_impl_` functions merely check and transform the object into a type that can + *be transformed back into `ConnectorInstance` without losing data. + * + * `as_ref_`, and `as_mut_` functions obtain references to safe objects, but do not + * perform any memory transformations either. They are the safest to use, because + * there is no risk of accidentally consuming the whole object. + */ +typedef struct ConnectorInstance_CBox_c_void_____CArc_c_void { + const struct CloneVtbl_ConnectorInstanceContainer_CBox_c_void_____CArc_c_void *vtbl_clone; + const struct PhysicalMemoryVtbl_ConnectorInstanceContainer_CBox_c_void_____CArc_c_void *vtbl_physicalmemory; + const struct ConnectorCpuStateVtbl_ConnectorInstanceContainer_CBox_c_void_____CArc_c_void *vtbl_connectorcpustate; + struct ConnectorInstanceContainer_CBox_c_void_____CArc_c_void container; +} ConnectorInstance_CBox_c_void_____CArc_c_void; + +// Typedef for default container and context type +typedef struct ConnectorInstance_CBox_c_void_____CArc_c_void ConnectorInstance; + +typedef struct ConnectorInstance_CBox_c_void_____CArc_c_void ConnectorInstanceBaseCtxBox_c_void__CArc_c_void; + +typedef ConnectorInstanceBaseCtxBox_c_void__CArc_c_void ConnectorInstanceBaseArcBox_c_void__c_void; + +typedef ConnectorInstanceBaseArcBox_c_void__c_void ConnectorInstanceArcBox; + +typedef ConnectorInstanceArcBox MuConnectorInstanceArcBox; + +typedef struct OsInstanceContainer_CBox_c_void_____CArc_c_void { + struct CBox_c_void instance; + struct CArc_c_void context; +} OsInstanceContainer_CBox_c_void_____CArc_c_void; + +/** + * CGlue vtable for trait Clone. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +typedef struct CloneVtbl_OsInstanceContainer_CBox_c_void_____CArc_c_void { + struct OsInstanceContainer_CBox_c_void_____CArc_c_void (*clone)(const struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont); +} CloneVtbl_OsInstanceContainer_CBox_c_void_____CArc_c_void; + +typedef struct Callback_c_void__Address { + void *context; + bool (*func)(void*, Address); +} Callback_c_void__Address; + +typedef struct Callback_c_void__Address OpaqueCallback_Address; + +typedef OpaqueCallback_Address AddressCallback; + +/** + * Type meant for process IDs + * + * If there is a case where Pid can be over 32-bit limit, or negative, please open an issue, we + * would love to see that. + */ +typedef uint32_t Pid; + +/** + * Exit code of a process + */ +typedef int32_t ExitCode; + +/** + * The state of a process + * + * # Remarks + * + * In case the exit code isn't known ProcessState::Unknown is set. + */ +typedef enum ProcessState_Tag { + ProcessState_Unknown, + ProcessState_Alive, + ProcessState_Dead, +} ProcessState_Tag; + +typedef struct ProcessState { + ProcessState_Tag tag; + union { + struct { + ExitCode dead; + }; + }; +} ProcessState; + +/** + * Wrapper around null-terminated C-style strings. + * + * Analog to Rust's `String`, [`ReprCString`] owns the underlying data. + */ +typedef char *ReprCString; + +typedef enum ArchitectureIdent_Tag { + /** + * Unknown architecture. Could be third-party implemented. memflow knows how to work on them, + * but is unable to instantiate them. + */ + ArchitectureIdent_Unknown, + /** + * X86 with specified bitness and address extensions + * + * First argument - `bitness` controls whether it's 32, or 64 bit variant. + * Second argument - `address_extensions` control whether address extensions are + * enabled (PAE on x32, or LA57 on x64). Warning: LA57 is currently unsupported. + */ + ArchitectureIdent_X86, + /** + * Arm 64-bit architecture with specified page size + * + * Valid page sizes are 4kb, 16kb, 64kb. Only 4kb is supported at the moment + */ + ArchitectureIdent_AArch64, +} ArchitectureIdent_Tag; + +typedef struct ArchitectureIdent_X86_Body { + uint8_t _0; + bool _1; +} ArchitectureIdent_X86_Body; + +typedef struct ArchitectureIdent { + ArchitectureIdent_Tag tag; + union { + struct { + uintptr_t unknown; + }; + ArchitectureIdent_X86_Body x86; + struct { + uintptr_t a_arch64; + }; + }; +} ArchitectureIdent; + +/** + * Process information structure + * + * This structure implements basic process information. Architectures are provided both of the + * system, and of the process. + */ +typedef struct ProcessInfo { + /** + * The base address of this process. + * + * # Remarks + * + * On Windows this will be the address of the [`_EPROCESS`](https://www.nirsoft.net/kernel_struct/vista/EPROCESS.html) structure. + */ + Address address; + /** + * ID of this process. + */ + Pid pid; + /** + * The current status of the process at the time when this process info was fetched. + * + * # Remarks + * + * This field is highly volatile and can be re-checked with the [`Process::state()`] function. + */ + struct ProcessState state; + /** + * Name of the process. + */ + ReprCString name; + /** + * Path of the process binary + */ + ReprCString path; + /** + * Command line the process was started with. + */ + ReprCString command_line; + /** + * System architecture of the target system. + */ + struct ArchitectureIdent sys_arch; + /** + * Process architecture + * + * # Remarks + * + * Specifically on 64-bit systems this could be different + * to the `sys_arch` in case the process is an emulated 32-bit process. + * + * On windows this technique is called [`WOW64`](https://docs.microsoft.com/en-us/windows/win32/winprog64/wow64-implementation-details). + */ + struct ArchitectureIdent proc_arch; + /** + * Directory Table Base + * + * # Remarks + * + * These fields contain the translation base used to translate virtual memory addresses into physical memory addresses. + * On x86 systems only `dtb1` is set because only one dtb is used. + * On arm systems both `dtb1` and `dtb2` are set to their corresponding values. + */ + Address dtb1; + Address dtb2; +} ProcessInfo; + +typedef struct Callback_c_void__ProcessInfo { + void *context; + bool (*func)(void*, struct ProcessInfo); +} Callback_c_void__ProcessInfo; + +typedef struct Callback_c_void__ProcessInfo OpaqueCallback_ProcessInfo; + +typedef OpaqueCallback_ProcessInfo ProcessInfoCallback; + +/** + * Pair of address and architecture used for callbacks + */ +typedef struct ModuleAddressInfo { + Address address; + struct ArchitectureIdent arch; +} ModuleAddressInfo; + +typedef struct Callback_c_void__ModuleAddressInfo { + void *context; + bool (*func)(void*, struct ModuleAddressInfo); +} Callback_c_void__ModuleAddressInfo; + +typedef struct Callback_c_void__ModuleAddressInfo OpaqueCallback_ModuleAddressInfo; + +typedef OpaqueCallback_ModuleAddressInfo ModuleAddressCallback; + +/** + * Module information structure + */ +typedef struct ModuleInfo { + /** + * Returns the address of the module header. + * + * # Remarks + * + * On Windows this will be the address where the [`PEB`](https://docs.microsoft.com/en-us/windows/win32/api/winternl/ns-winternl-peb) entry is stored. + */ + Address address; + /** + * The base address of the parent process. + * + * # Remarks + * + * This field is analog to the `ProcessInfo::address` field. + */ + Address parent_process; + /** + * The actual base address of this module. + * + * # Remarks + * + * The base address is contained in the virtual address range of the process + * this module belongs to. + */ + Address base; + /** + * Size of the module + */ + umem size; + /** + * Name of the module + */ + ReprCString name; + /** + * Path of the module + */ + ReprCString path; + /** + * Architecture of the module + * + * # Remarks + * + * Emulated processes often have 2 separate lists of modules, one visible to the emulated + * context (e.g. all 32-bit modules in a WoW64 process), and the other for all native modules + * needed to support the process emulation. This should be equal to either + * `ProcessInfo::proc_arch`, or `ProcessInfo::sys_arch` of the parent process. + */ + struct ArchitectureIdent arch; +} ModuleInfo; + +typedef struct Callback_c_void__ModuleInfo { + void *context; + bool (*func)(void*, struct ModuleInfo); +} Callback_c_void__ModuleInfo; + +typedef struct Callback_c_void__ModuleInfo OpaqueCallback_ModuleInfo; + +typedef OpaqueCallback_ModuleInfo ModuleInfoCallback; + +/** + * Import information structure + */ +typedef struct ImportInfo { + /** + * Name of the import + */ + ReprCString name; + /** + * Offset of this import from the containing modules base address + */ + umem offset; +} ImportInfo; + +typedef struct Callback_c_void__ImportInfo { + void *context; + bool (*func)(void*, struct ImportInfo); +} Callback_c_void__ImportInfo; + +typedef struct Callback_c_void__ImportInfo OpaqueCallback_ImportInfo; + +typedef OpaqueCallback_ImportInfo ImportCallback; + +/** + * Export information structure + */ +typedef struct ExportInfo { + /** + * Name of the export + */ + ReprCString name; + /** + * Offset of this export from the containing modules base address + */ + umem offset; +} ExportInfo; + +typedef struct Callback_c_void__ExportInfo { + void *context; + bool (*func)(void*, struct ExportInfo); +} Callback_c_void__ExportInfo; + +typedef struct Callback_c_void__ExportInfo OpaqueCallback_ExportInfo; + +typedef OpaqueCallback_ExportInfo ExportCallback; + +/** + * Section information structure + */ +typedef struct SectionInfo { + /** + * Name of the section + */ + ReprCString name; + /** + * Virtual address of this section (essentially module_info.base + virtual_address) + */ + Address base; + /** + * Size of this section + */ + umem size; +} SectionInfo; + +typedef struct Callback_c_void__SectionInfo { + void *context; + bool (*func)(void*, struct SectionInfo); +} Callback_c_void__SectionInfo; + +typedef struct Callback_c_void__SectionInfo OpaqueCallback_SectionInfo; + +typedef OpaqueCallback_SectionInfo SectionCallback; + +typedef int64_t imem; + +/** + * FFI-safe 3 element tuple. + */ +typedef struct CTup3_Address__umem__PageType { + Address _0; + umem _1; + PageType _2; +} CTup3_Address__umem__PageType; + +typedef struct CTup3_Address__umem__PageType MemoryRange; + +typedef struct Callback_c_void__MemoryRange { + void *context; + bool (*func)(void*, MemoryRange); +} Callback_c_void__MemoryRange; + +typedef struct Callback_c_void__MemoryRange OpaqueCallback_MemoryRange; + +typedef OpaqueCallback_MemoryRange MemoryRangeCallback; + +/** + * FFI-safe 2 element tuple. + */ +typedef struct CTup2_Address__umem { + Address _0; + umem _1; +} CTup2_Address__umem; + +typedef struct CTup2_Address__umem VtopRange; + +/** + * Wrapper around const slices. + * + * This is meant as a safe type to pass across the FFI boundary with similar semantics as regular + * slice. However, not all functionality is present, use the slice conversion functions. + * + * # Examples + * + * Simple conversion: + * + * ``` + * use cglue::slice::CSliceRef; + * + * let arr = [0, 5, 3, 2]; + * + * let cslice = CSliceRef::from(&arr[..]); + * + * let slice = cslice.as_slice(); + * + * assert_eq!(&arr, slice); + * ``` + */ +typedef struct CSliceRef_VtopRange { + const VtopRange *data; + uintptr_t len; +} CSliceRef_VtopRange; + +/** + * Virtual page range information with physical mappings used for callbacks + */ +typedef struct VirtualTranslation { + Address in_virtual; + umem size; + struct PhysicalAddress out_physical; +} VirtualTranslation; + +typedef struct Callback_c_void__VirtualTranslation { + void *context; + bool (*func)(void*, struct VirtualTranslation); +} Callback_c_void__VirtualTranslation; + +typedef struct Callback_c_void__VirtualTranslation OpaqueCallback_VirtualTranslation; + +typedef OpaqueCallback_VirtualTranslation VirtualTranslationCallback; + +typedef struct VirtualTranslationFail { + Address from; + umem size; +} VirtualTranslationFail; + +typedef struct Callback_c_void__VirtualTranslationFail { + void *context; + bool (*func)(void*, struct VirtualTranslationFail); +} Callback_c_void__VirtualTranslationFail; + +typedef struct Callback_c_void__VirtualTranslationFail OpaqueCallback_VirtualTranslationFail; + +typedef OpaqueCallback_VirtualTranslationFail VirtualTranslationFailCallback; + +/** + * A `Page` holds information about a memory page. + * + * More information about paging can be found [here](https://en.wikipedia.org/wiki/Paging). + */ +typedef struct Page { + /** + * Contains the page type (see above). + */ + PageType page_type; + /** + * Contains the base address of this page. + */ + Address page_base; + /** + * Contains the size of this page. + */ + umem page_size; +} Page; +/** + * A page object that is invalid. + */ +#define Page_INVALID (Page){ .page_type = PageType_UNKNOWN, .page_base = Address_INVALID, .page_size = 0 } + +/** + * FFI-safe Option. + * + * This type is not really meant for general use, but rather as a last-resort conversion for type + * wrapping. + * + * Typical workflow would include temporarily converting into/from COption. + */ +typedef enum COption_Address_Tag { + COption_Address_None_Address, + COption_Address_Some_Address, +} COption_Address_Tag; + +typedef struct COption_Address { + COption_Address_Tag tag; + union { + struct { + Address some; + }; + }; +} COption_Address; + +/** + * Information block about OS + * + * This provides some basic information about the OS in question. `base`, and `size` may be + * omitted in some circumstances (lack of kernel, or privileges). But architecture should always + * be correct. + */ +typedef struct OsInfo { + /** + * Base address of the OS kernel + */ + Address base; + /** + * Size of the OS kernel + */ + umem size; + /** + * System architecture + */ + struct ArchitectureIdent arch; +} OsInfo; + +/** + * CGlue vtable for trait Os. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +typedef struct OsVtbl_OsInstanceContainer_CBox_c_void_____CArc_c_void { + int32_t (*process_address_list_callback)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + AddressCallback callback); + int32_t (*process_info_list_callback)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + ProcessInfoCallback callback); + int32_t (*process_info_by_address)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address address, + struct ProcessInfo *ok_out); + int32_t (*process_info_by_name)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CSliceRef_u8 name, + struct ProcessInfo *ok_out); + int32_t (*process_info_by_pid)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Pid pid, + struct ProcessInfo *ok_out); + int32_t (*process_by_info)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct ProcessInfo info, + struct ProcessInstance_CBox_c_void_____CArc_c_void *ok_out); + int32_t (*into_process_by_info)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void cont, + struct ProcessInfo info, + struct IntoProcessInstance_CBox_c_void_____CArc_c_void *ok_out); + int32_t (*process_by_address)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address addr, + struct ProcessInstance_CBox_c_void_____CArc_c_void *ok_out); + int32_t (*process_by_name)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CSliceRef_u8 name, + struct ProcessInstance_CBox_c_void_____CArc_c_void *ok_out); + int32_t (*process_by_pid)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Pid pid, + struct ProcessInstance_CBox_c_void_____CArc_c_void *ok_out); + int32_t (*into_process_by_address)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void cont, + Address addr, + struct IntoProcessInstance_CBox_c_void_____CArc_c_void *ok_out); + int32_t (*into_process_by_name)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void cont, + struct CSliceRef_u8 name, + struct IntoProcessInstance_CBox_c_void_____CArc_c_void *ok_out); + int32_t (*into_process_by_pid)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void cont, + Pid pid, + struct IntoProcessInstance_CBox_c_void_____CArc_c_void *ok_out); + int32_t (*module_address_list_callback)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + AddressCallback callback); + int32_t (*module_list_callback)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + ModuleInfoCallback callback); + int32_t (*module_by_address)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address address, + struct ModuleInfo *ok_out); + int32_t (*module_by_name)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CSliceRef_u8 name, + struct ModuleInfo *ok_out); + int32_t (*primary_module_address)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address *ok_out); + int32_t (*primary_module)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct ModuleInfo *ok_out); + int32_t (*module_import_list_callback)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + const struct ModuleInfo *info, + ImportCallback callback); + int32_t (*module_export_list_callback)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + const struct ModuleInfo *info, + ExportCallback callback); + int32_t (*module_section_list_callback)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + const struct ModuleInfo *info, + SectionCallback callback); + int32_t (*module_import_by_name)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + const struct ModuleInfo *info, + struct CSliceRef_u8 name, + struct ImportInfo *ok_out); + int32_t (*module_export_by_name)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + const struct ModuleInfo *info, + struct CSliceRef_u8 name, + struct ExportInfo *ok_out); + int32_t (*module_section_by_name)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + const struct ModuleInfo *info, + struct CSliceRef_u8 name, + struct SectionInfo *ok_out); + const struct OsInfo *(*info)(const struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont); +} OsVtbl_OsInstanceContainer_CBox_c_void_____CArc_c_void; + +/** + * CGlue vtable for trait MemoryView. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +typedef struct MemoryViewVtbl_OsInstanceContainer_CBox_c_void_____CArc_c_void { + int32_t (*read_raw_iter)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + ReadRawMemOps data); + int32_t (*write_raw_iter)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + WriteRawMemOps data); + struct MemoryViewMetadata (*metadata)(const struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont); + int32_t (*read_iter)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CIterator_ReadData inp, + ReadCallback *out, + ReadCallback *out_fail); + int32_t (*read_raw_list)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CSliceMut_ReadData data); + int32_t (*read_raw_into)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address addr, + struct CSliceMut_u8 out); + int32_t (*write_iter)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CIterator_WriteData inp, + WriteCallback *out, + WriteCallback *out_fail); + int32_t (*write_raw_list)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CSliceRef_WriteData data); + int32_t (*write_raw)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address addr, + struct CSliceRef_u8 data); +} MemoryViewVtbl_OsInstanceContainer_CBox_c_void_____CArc_c_void; + +/** + * Simple CGlue trait object container. + * + * This is the simplest form of container, represented by an instance, clone context, and + * temporary return context. + * + * `instance` value usually is either a reference, or a mutable reference, or a `CBox`, which + * contains static reference to the instance, and a dedicated drop function for freeing resources. + * + * `context` is either `PhantomData` representing nothing, or typically a `CArc` that can be + * cloned at will, reference counting some resource, like a `Library` for automatic unloading. + * + * `ret_tmp` is usually `PhantomData` representing nothing, unless the trait has functions that + * return references to associated types, in which case space is reserved for wrapping structures. + */ +typedef struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardRetTmp_CArc_c_void { + struct CBox_c_void instance; + CArc_c_void context; +} CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardRetTmp_CArc_c_void; +/** + * Simple CGlue trait object container. + * + * This is the simplest form of container, represented by an instance, clone context, and + * temporary return context. + * + * `instance` value usually is either a reference, or a mutable reference, or a `CBox`, which + * contains static reference to the instance, and a dedicated drop function for freeing resources. + * + * `context` is either `PhantomData` representing nothing, or typically a `CArc` that can be + * cloned at will, reference counting some resource, like a `Library` for automatic unloading. + * + * `ret_tmp` is usually `PhantomData` representing nothing, unless the trait has functions that + * return references to associated types, in which case space is reserved for wrapping structures. + */ +typedef struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardStateRetTmp_CArc_c_void { + struct CBox_c_void instance; + CArc_c_void context; +} CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardStateRetTmp_CArc_c_void; +/** + * CGlue vtable for trait KeyboardState. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +typedef struct KeyboardStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardStateRetTmp_CArc_c_void { + bool (*is_down)(const struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardStateRetTmp_CArc_c_void *cont, + int32_t vk); +} KeyboardStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardStateRetTmp_CArc_c_void; +/** + * Simple CGlue trait object. + * + * This is the simplest form of CGlue object, represented by a container and vtable for a single + * trait. + * + * Container merely is a this pointer with some optional temporary return reference context. + */ +typedef struct CGlueTraitObj_CBox_c_void_____KeyboardStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardStateRetTmp_CArc_c_void______________CArc_c_void_____KeyboardStateRetTmp_CArc_c_void { + const struct KeyboardStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardStateRetTmp_CArc_c_void *vtbl; + struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardStateRetTmp_CArc_c_void container; +} CGlueTraitObj_CBox_c_void_____KeyboardStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardStateRetTmp_CArc_c_void______________CArc_c_void_____KeyboardStateRetTmp_CArc_c_void; + +// Typedef for default container and context type +typedef struct CGlueTraitObj_CBox_c_void_____KeyboardStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardStateRetTmp_CArc_c_void______________CArc_c_void_____KeyboardStateRetTmp_CArc_c_void KeyboardState; +/** + * Base CGlue trait object for trait KeyboardState. + */ +typedef struct CGlueTraitObj_CBox_c_void_____KeyboardStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardStateRetTmp_CArc_c_void______________CArc_c_void_____KeyboardStateRetTmp_CArc_c_void KeyboardStateBase_CBox_c_void_____CArc_c_void; +/** + * CGlue vtable for trait Keyboard. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +typedef struct KeyboardVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardRetTmp_CArc_c_void { + bool (*is_down)(struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardRetTmp_CArc_c_void *cont, + int32_t vk); + void (*set_down)(struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardRetTmp_CArc_c_void *cont, + int32_t vk, + bool down); + int32_t (*state)(struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardRetTmp_CArc_c_void *cont, + KeyboardStateBase_CBox_c_void_____CArc_c_void *ok_out); +} KeyboardVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardRetTmp_CArc_c_void; +/** + * Simple CGlue trait object. + * + * This is the simplest form of CGlue object, represented by a container and vtable for a single + * trait. + * + * Container merely is a this pointer with some optional temporary return reference context. + */ +typedef struct CGlueTraitObj_CBox_c_void_____KeyboardVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardRetTmp_CArc_c_void______________CArc_c_void_____KeyboardRetTmp_CArc_c_void { + const struct KeyboardVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardRetTmp_CArc_c_void *vtbl; + struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardRetTmp_CArc_c_void container; +} CGlueTraitObj_CBox_c_void_____KeyboardVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardRetTmp_CArc_c_void______________CArc_c_void_____KeyboardRetTmp_CArc_c_void; + +// Typedef for default container and context type +typedef struct CGlueTraitObj_CBox_c_void_____KeyboardVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardRetTmp_CArc_c_void______________CArc_c_void_____KeyboardRetTmp_CArc_c_void Keyboard; +/** + * Base CGlue trait object for trait Keyboard. + */ +typedef struct CGlueTraitObj_CBox_c_void_____KeyboardVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardRetTmp_CArc_c_void______________CArc_c_void_____KeyboardRetTmp_CArc_c_void KeyboardBase_CBox_c_void_____CArc_c_void; +typedef struct IntoKeyboardContainer_CBox_c_void_____CArc_c_void { + struct CBox_c_void instance; + CArc_c_void context; +} IntoKeyboardContainer_CBox_c_void_____CArc_c_void; +/** + * CGlue vtable for trait Clone. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +typedef struct CloneVtbl_IntoKeyboardContainer_CBox_c_void_____CArc_c_void { + struct IntoKeyboardContainer_CBox_c_void_____CArc_c_void (*clone)(const struct IntoKeyboardContainer_CBox_c_void_____CArc_c_void *cont); +} CloneVtbl_IntoKeyboardContainer_CBox_c_void_____CArc_c_void; +/** + * CGlue vtable for trait Keyboard. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +typedef struct KeyboardVtbl_IntoKeyboardContainer_CBox_c_void_____CArc_c_void { + bool (*is_down)(struct IntoKeyboardContainer_CBox_c_void_____CArc_c_void *cont, int32_t vk); + void (*set_down)(struct IntoKeyboardContainer_CBox_c_void_____CArc_c_void *cont, + int32_t vk, + bool down); + int32_t (*state)(struct IntoKeyboardContainer_CBox_c_void_____CArc_c_void *cont, + KeyboardStateBase_CBox_c_void_____CArc_c_void *ok_out); +} KeyboardVtbl_IntoKeyboardContainer_CBox_c_void_____CArc_c_void; +/** + * Trait group potentially implementing `:: cglue :: ext :: core :: clone :: Clone < > + Keyboard < >` traits. + * + * Optional traits are not implemented here, however. There are numerous conversion + * functions available for safely retrieving a concrete collection of traits. + * + * `check_impl_` functions allow to check if the object implements the wanted traits. + * + * `into_impl_` functions consume the object and produce a new final structure that + * keeps only the required information. + * + * `cast_impl_` functions merely check and transform the object into a type that can + *be transformed back into `IntoKeyboard` without losing data. + * + * `as_ref_`, and `as_mut_` functions obtain references to safe objects, but do not + * perform any memory transformations either. They are the safest to use, because + * there is no risk of accidentally consuming the whole object. + */ +typedef struct IntoKeyboard_CBox_c_void_____CArc_c_void { + const struct CloneVtbl_IntoKeyboardContainer_CBox_c_void_____CArc_c_void *vtbl_clone; + const struct KeyboardVtbl_IntoKeyboardContainer_CBox_c_void_____CArc_c_void *vtbl_keyboard; + struct IntoKeyboardContainer_CBox_c_void_____CArc_c_void container; +} IntoKeyboard_CBox_c_void_____CArc_c_void; + +// Typedef for default container and context type +typedef struct IntoKeyboard_CBox_c_void_____CArc_c_void IntoKeyboard; +/** + * CGlue vtable for trait OsKeyboard. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +typedef struct OsKeyboardVtbl_OsInstanceContainer_CBox_c_void_____CArc_c_void { + int32_t (*keyboard)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + KeyboardBase_CBox_c_void_____CArc_c_void *ok_out); + int32_t (*into_keyboard)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void cont, + struct IntoKeyboard_CBox_c_void_____CArc_c_void *ok_out); +} OsKeyboardVtbl_OsInstanceContainer_CBox_c_void_____CArc_c_void; + +/** + * CGlue vtable for trait PhysicalMemory. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +typedef struct PhysicalMemoryVtbl_OsInstanceContainer_CBox_c_void_____CArc_c_void { + int32_t (*phys_read_raw_iter)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + PhysicalReadMemOps data); + int32_t (*phys_write_raw_iter)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + PhysicalWriteMemOps data); + struct PhysicalMemoryMetadata (*metadata)(const struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont); + void (*set_mem_map)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CSliceRef_PhysicalMemoryMapping _mem_map); + MemoryViewBase_CBox_c_void_____CArc_c_void (*into_phys_view)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void cont); + MemoryViewBase_CBox_c_void_____CArc_c_void (*phys_view)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont); +} PhysicalMemoryVtbl_OsInstanceContainer_CBox_c_void_____CArc_c_void; + +/** + * CGlue vtable for trait VirtualTranslate. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +typedef struct VirtualTranslateVtbl_OsInstanceContainer_CBox_c_void_____CArc_c_void { + void (*virt_to_phys_list)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CSliceRef_VtopRange addrs, + VirtualTranslationCallback out, + VirtualTranslationFailCallback out_fail); + void (*virt_to_phys_range)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address start, + Address end, + VirtualTranslationCallback out); + void (*virt_translation_map_range)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address start, + Address end, + VirtualTranslationCallback out); + void (*virt_page_map_range)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + imem gap_size, + Address start, + Address end, + MemoryRangeCallback out); + int32_t (*virt_to_phys)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address address, + struct PhysicalAddress *ok_out); + int32_t (*virt_page_info)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address addr, + struct Page *ok_out); + void (*virt_translation_map)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + VirtualTranslationCallback out); + struct COption_Address (*phys_to_virt)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address phys); + void (*virt_page_map)(struct OsInstanceContainer_CBox_c_void_____CArc_c_void *cont, + imem gap_size, + MemoryRangeCallback out); +} VirtualTranslateVtbl_OsInstanceContainer_CBox_c_void_____CArc_c_void; + +/** + * Trait group potentially implementing `:: cglue :: ext :: core :: clone :: Clone < > + Os < > + MemoryView < > + OsKeyboard < > + PhysicalMemory < > + VirtualTranslate < >` traits. + * + * Optional traits are not implemented here, however. There are numerous conversion + * functions available for safely retrieving a concrete collection of traits. + * + * `check_impl_` functions allow to check if the object implements the wanted traits. + * + * `into_impl_` functions consume the object and produce a new final structure that + * keeps only the required information. + * + * `cast_impl_` functions merely check and transform the object into a type that can + *be transformed back into `OsInstance` without losing data. + * + * `as_ref_`, and `as_mut_` functions obtain references to safe objects, but do not + * perform any memory transformations either. They are the safest to use, because + * there is no risk of accidentally consuming the whole object. + */ +typedef struct OsInstance_CBox_c_void_____CArc_c_void { + const struct CloneVtbl_OsInstanceContainer_CBox_c_void_____CArc_c_void *vtbl_clone; + const struct OsVtbl_OsInstanceContainer_CBox_c_void_____CArc_c_void *vtbl_os; + const struct MemoryViewVtbl_OsInstanceContainer_CBox_c_void_____CArc_c_void *vtbl_memoryview; + const struct OsKeyboardVtbl_OsInstanceContainer_CBox_c_void_____CArc_c_void *vtbl_oskeyboard; + const struct PhysicalMemoryVtbl_OsInstanceContainer_CBox_c_void_____CArc_c_void *vtbl_physicalmemory; + const struct VirtualTranslateVtbl_OsInstanceContainer_CBox_c_void_____CArc_c_void *vtbl_virtualtranslate; + struct OsInstanceContainer_CBox_c_void_____CArc_c_void container; +} OsInstance_CBox_c_void_____CArc_c_void; + +// Typedef for default container and context type +typedef struct OsInstance_CBox_c_void_____CArc_c_void OsInstance; + +typedef struct OsInstance_CBox_c_void_____CArc_c_void OsInstanceBaseCtxBox_c_void__CArc_c_void; + +typedef OsInstanceBaseCtxBox_c_void__CArc_c_void OsInstanceBaseArcBox_c_void__c_void; + +typedef OsInstanceBaseArcBox_c_void__c_void OsInstanceArcBox; + +typedef OsInstanceArcBox MuOsInstanceArcBox; + +typedef struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void { + struct CBox_c_void instance; + struct CArc_c_void context; +} ProcessInstanceContainer_CBox_c_void_____CArc_c_void; + +/** + * CGlue vtable for trait MemoryView. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +typedef struct MemoryViewVtbl_ProcessInstanceContainer_CBox_c_void_____CArc_c_void { + int32_t (*read_raw_iter)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + ReadRawMemOps data); + int32_t (*write_raw_iter)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + WriteRawMemOps data); + struct MemoryViewMetadata (*metadata)(const struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont); + int32_t (*read_iter)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CIterator_ReadData inp, + ReadCallback *out, + ReadCallback *out_fail); + int32_t (*read_raw_list)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CSliceMut_ReadData data); + int32_t (*read_raw_into)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address addr, + struct CSliceMut_u8 out); + int32_t (*write_iter)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CIterator_WriteData inp, + WriteCallback *out, + WriteCallback *out_fail); + int32_t (*write_raw_list)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CSliceRef_WriteData data); + int32_t (*write_raw)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address addr, + struct CSliceRef_u8 data); +} MemoryViewVtbl_ProcessInstanceContainer_CBox_c_void_____CArc_c_void; + +/** + * CGlue vtable for trait Process. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +typedef struct ProcessVtbl_ProcessInstanceContainer_CBox_c_void_____CArc_c_void { + struct ProcessState (*state)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont); + int32_t (*set_dtb)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address dtb1, + Address dtb2); + int32_t (*module_address_list_callback)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + const struct ArchitectureIdent *target_arch, + ModuleAddressCallback callback); + int32_t (*module_list_callback)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + const struct ArchitectureIdent *target_arch, + ModuleInfoCallback callback); + int32_t (*module_by_address)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address address, + struct ArchitectureIdent architecture, + struct ModuleInfo *ok_out); + int32_t (*module_by_name_arch)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CSliceRef_u8 name, + const struct ArchitectureIdent *architecture, + struct ModuleInfo *ok_out); + int32_t (*module_by_name)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CSliceRef_u8 name, + struct ModuleInfo *ok_out); + int32_t (*primary_module_address)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address *ok_out); + int32_t (*primary_module)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct ModuleInfo *ok_out); + int32_t (*module_import_list_callback)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + const struct ModuleInfo *info, + ImportCallback callback); + int32_t (*module_export_list_callback)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + const struct ModuleInfo *info, + ExportCallback callback); + int32_t (*module_section_list_callback)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + const struct ModuleInfo *info, + SectionCallback callback); + int32_t (*module_import_by_name)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + const struct ModuleInfo *info, + struct CSliceRef_u8 name, + struct ImportInfo *ok_out); + int32_t (*module_export_by_name)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + const struct ModuleInfo *info, + struct CSliceRef_u8 name, + struct ExportInfo *ok_out); + int32_t (*module_section_by_name)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + const struct ModuleInfo *info, + struct CSliceRef_u8 name, + struct SectionInfo *ok_out); + const struct ProcessInfo *(*info)(const struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont); + void (*mapped_mem_range)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + imem gap_size, + Address start, + Address end, + MemoryRangeCallback out); + void (*mapped_mem)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + imem gap_size, + MemoryRangeCallback out); +} ProcessVtbl_ProcessInstanceContainer_CBox_c_void_____CArc_c_void; + +/** + * CGlue vtable for trait VirtualTranslate. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +typedef struct VirtualTranslateVtbl_ProcessInstanceContainer_CBox_c_void_____CArc_c_void { + void (*virt_to_phys_list)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CSliceRef_VtopRange addrs, + VirtualTranslationCallback out, + VirtualTranslationFailCallback out_fail); + void (*virt_to_phys_range)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address start, + Address end, + VirtualTranslationCallback out); + void (*virt_translation_map_range)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address start, + Address end, + VirtualTranslationCallback out); + void (*virt_page_map_range)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + imem gap_size, + Address start, + Address end, + MemoryRangeCallback out); + int32_t (*virt_to_phys)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address address, + struct PhysicalAddress *ok_out); + int32_t (*virt_page_info)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address addr, + struct Page *ok_out); + void (*virt_translation_map)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + VirtualTranslationCallback out); + struct COption_Address (*phys_to_virt)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address phys); + void (*virt_page_map)(struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + imem gap_size, + MemoryRangeCallback out); +} VirtualTranslateVtbl_ProcessInstanceContainer_CBox_c_void_____CArc_c_void; + +/** + * Trait group potentially implementing `MemoryView < > + Process < > + VirtualTranslate < >` traits. + * + * Optional traits are not implemented here, however. There are numerous conversion + * functions available for safely retrieving a concrete collection of traits. + * + * `check_impl_` functions allow to check if the object implements the wanted traits. + * + * `into_impl_` functions consume the object and produce a new final structure that + * keeps only the required information. + * + * `cast_impl_` functions merely check and transform the object into a type that can + *be transformed back into `ProcessInstance` without losing data. + * + * `as_ref_`, and `as_mut_` functions obtain references to safe objects, but do not + * perform any memory transformations either. They are the safest to use, because + * there is no risk of accidentally consuming the whole object. + */ +typedef struct ProcessInstance_CBox_c_void_____CArc_c_void { + const struct MemoryViewVtbl_ProcessInstanceContainer_CBox_c_void_____CArc_c_void *vtbl_memoryview; + const struct ProcessVtbl_ProcessInstanceContainer_CBox_c_void_____CArc_c_void *vtbl_process; + const struct VirtualTranslateVtbl_ProcessInstanceContainer_CBox_c_void_____CArc_c_void *vtbl_virtualtranslate; + struct ProcessInstanceContainer_CBox_c_void_____CArc_c_void container; +} ProcessInstance_CBox_c_void_____CArc_c_void; + +// Typedef for default container and context type +typedef struct ProcessInstance_CBox_c_void_____CArc_c_void ProcessInstance; + +typedef struct ProcessInstance_CBox_c_void_____CArc_c_void ProcessInstanceBaseCtxBox_c_void__CArc_c_void; + +typedef ProcessInstanceBaseCtxBox_c_void__CArc_c_void ProcessInstanceBaseArcBox_c_void__c_void; + +typedef ProcessInstanceBaseArcBox_c_void__c_void ProcessInstanceArcBox; + +typedef struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void { + struct CBox_c_void instance; + struct CArc_c_void context; +} IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void; + +/** + * CGlue vtable for trait Clone. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +typedef struct CloneVtbl_IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void { + struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void (*clone)(const struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont); +} CloneVtbl_IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void; + +/** + * CGlue vtable for trait MemoryView. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +typedef struct MemoryViewVtbl_IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void { + int32_t (*read_raw_iter)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + ReadRawMemOps data); + int32_t (*write_raw_iter)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + WriteRawMemOps data); + struct MemoryViewMetadata (*metadata)(const struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont); + int32_t (*read_iter)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CIterator_ReadData inp, + ReadCallback *out, + ReadCallback *out_fail); + int32_t (*read_raw_list)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CSliceMut_ReadData data); + int32_t (*read_raw_into)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address addr, + struct CSliceMut_u8 out); + int32_t (*write_iter)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CIterator_WriteData inp, + WriteCallback *out, + WriteCallback *out_fail); + int32_t (*write_raw_list)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CSliceRef_WriteData data); + int32_t (*write_raw)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address addr, + struct CSliceRef_u8 data); +} MemoryViewVtbl_IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void; + +/** + * CGlue vtable for trait Process. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +typedef struct ProcessVtbl_IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void { + struct ProcessState (*state)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont); + int32_t (*set_dtb)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address dtb1, + Address dtb2); + int32_t (*module_address_list_callback)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + const struct ArchitectureIdent *target_arch, + ModuleAddressCallback callback); + int32_t (*module_list_callback)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + const struct ArchitectureIdent *target_arch, + ModuleInfoCallback callback); + int32_t (*module_by_address)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address address, + struct ArchitectureIdent architecture, + struct ModuleInfo *ok_out); + int32_t (*module_by_name_arch)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CSliceRef_u8 name, + const struct ArchitectureIdent *architecture, + struct ModuleInfo *ok_out); + int32_t (*module_by_name)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CSliceRef_u8 name, + struct ModuleInfo *ok_out); + int32_t (*primary_module_address)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address *ok_out); + int32_t (*primary_module)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct ModuleInfo *ok_out); + int32_t (*module_import_list_callback)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + const struct ModuleInfo *info, + ImportCallback callback); + int32_t (*module_export_list_callback)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + const struct ModuleInfo *info, + ExportCallback callback); + int32_t (*module_section_list_callback)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + const struct ModuleInfo *info, + SectionCallback callback); + int32_t (*module_import_by_name)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + const struct ModuleInfo *info, + struct CSliceRef_u8 name, + struct ImportInfo *ok_out); + int32_t (*module_export_by_name)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + const struct ModuleInfo *info, + struct CSliceRef_u8 name, + struct ExportInfo *ok_out); + int32_t (*module_section_by_name)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + const struct ModuleInfo *info, + struct CSliceRef_u8 name, + struct SectionInfo *ok_out); + const struct ProcessInfo *(*info)(const struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont); + void (*mapped_mem_range)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + imem gap_size, + Address start, + Address end, + MemoryRangeCallback out); + void (*mapped_mem)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + imem gap_size, + MemoryRangeCallback out); +} ProcessVtbl_IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void; + +/** + * CGlue vtable for trait VirtualTranslate. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +typedef struct VirtualTranslateVtbl_IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void { + void (*virt_to_phys_list)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CSliceRef_VtopRange addrs, + VirtualTranslationCallback out, + VirtualTranslationFailCallback out_fail); + void (*virt_to_phys_range)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address start, + Address end, + VirtualTranslationCallback out); + void (*virt_translation_map_range)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address start, + Address end, + VirtualTranslationCallback out); + void (*virt_page_map_range)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + imem gap_size, + Address start, + Address end, + MemoryRangeCallback out); + int32_t (*virt_to_phys)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address address, + struct PhysicalAddress *ok_out); + int32_t (*virt_page_info)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address addr, + struct Page *ok_out); + void (*virt_translation_map)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + VirtualTranslationCallback out); + struct COption_Address (*phys_to_virt)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + Address phys); + void (*virt_page_map)(struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *cont, + imem gap_size, + MemoryRangeCallback out); +} VirtualTranslateVtbl_IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void; + +/** + * Trait group potentially implementing `:: cglue :: ext :: core :: clone :: Clone < > + MemoryView < > + Process < > + VirtualTranslate < >` traits. + * + * Optional traits are not implemented here, however. There are numerous conversion + * functions available for safely retrieving a concrete collection of traits. + * + * `check_impl_` functions allow to check if the object implements the wanted traits. + * + * `into_impl_` functions consume the object and produce a new final structure that + * keeps only the required information. + * + * `cast_impl_` functions merely check and transform the object into a type that can + *be transformed back into `IntoProcessInstance` without losing data. + * + * `as_ref_`, and `as_mut_` functions obtain references to safe objects, but do not + * perform any memory transformations either. They are the safest to use, because + * there is no risk of accidentally consuming the whole object. + */ +typedef struct IntoProcessInstance_CBox_c_void_____CArc_c_void { + const struct CloneVtbl_IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *vtbl_clone; + const struct MemoryViewVtbl_IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *vtbl_memoryview; + const struct ProcessVtbl_IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *vtbl_process; + const struct VirtualTranslateVtbl_IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void *vtbl_virtualtranslate; + struct IntoProcessInstanceContainer_CBox_c_void_____CArc_c_void container; +} IntoProcessInstance_CBox_c_void_____CArc_c_void; + +// Typedef for default container and context type +typedef struct IntoProcessInstance_CBox_c_void_____CArc_c_void IntoProcessInstance; + +typedef struct IntoProcessInstance_CBox_c_void_____CArc_c_void IntoProcessInstanceBaseCtxBox_c_void__CArc_c_void; + +typedef IntoProcessInstanceBaseCtxBox_c_void__CArc_c_void IntoProcessInstanceBaseArcBox_c_void__c_void; + +typedef IntoProcessInstanceBaseArcBox_c_void__c_void IntoProcessInstanceArcBox; + +/** + * Simple CGlue trait object container. + * + * This is the simplest form of container, represented by an instance, clone context, and + * temporary return context. + * + * `instance` value usually is either a reference, or a mutable reference, or a `CBox`, which + * contains static reference to the instance, and a dedicated drop function for freeing resources. + * + * `context` is either `PhantomData` representing nothing, or typically a `CArc` that can be + * cloned at will, reference counting some resource, like a `Library` for automatic unloading. + * + * `ret_tmp` is usually `PhantomData` representing nothing, unless the trait has functions that + * return references to associated types, in which case space is reserved for wrapping structures. + */ +typedef struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void { + struct CBox_c_void instance; + struct CArc_c_void context; +} CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void; + +/** + * CGlue vtable for trait MemoryView. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +typedef struct MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void { + int32_t (*read_raw_iter)(struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void *cont, + ReadRawMemOps data); + int32_t (*write_raw_iter)(struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void *cont, + WriteRawMemOps data); + struct MemoryViewMetadata (*metadata)(const struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void *cont); + int32_t (*read_iter)(struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void *cont, + struct CIterator_ReadData inp, + ReadCallback *out, + ReadCallback *out_fail); + int32_t (*read_raw_list)(struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void *cont, + struct CSliceMut_ReadData data); + int32_t (*read_raw_into)(struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void *cont, + Address addr, + struct CSliceMut_u8 out); + int32_t (*write_iter)(struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void *cont, + struct CIterator_WriteData inp, + WriteCallback *out, + WriteCallback *out_fail); + int32_t (*write_raw_list)(struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void *cont, + struct CSliceRef_WriteData data); + int32_t (*write_raw)(struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void *cont, + Address addr, + struct CSliceRef_u8 data); +} MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void; + +/** + * Simple CGlue trait object. + * + * This is the simplest form of CGlue object, represented by a container and vtable for a single + * trait. + * + * Container merely is a this pointer with some optional temporary return reference context. + */ +typedef struct CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void { + const struct MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void *vtbl; + struct CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void container; +} CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void; + +// Typedef for default container and context type +typedef struct CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void MemoryView;/** + * CGlue vtable for trait PhysicalMemory. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +typedef struct PhysicalMemoryVtbl_ConnectorInstanceContainer_CBox_c_void_____CArc_c_void { + int32_t (*phys_read_raw_iter)(struct ConnectorInstanceContainer_CBox_c_void_____CArc_c_void *cont, + PhysicalReadMemOps data); + int32_t (*phys_write_raw_iter)(struct ConnectorInstanceContainer_CBox_c_void_____CArc_c_void *cont, + PhysicalWriteMemOps data); + struct PhysicalMemoryMetadata (*metadata)(const struct ConnectorInstanceContainer_CBox_c_void_____CArc_c_void *cont); + void (*set_mem_map)(struct ConnectorInstanceContainer_CBox_c_void_____CArc_c_void *cont, + struct CSliceRef_PhysicalMemoryMapping _mem_map); + MemoryViewBase_CBox_c_void_____CArc_c_void (*into_phys_view)(struct ConnectorInstanceContainer_CBox_c_void_____CArc_c_void cont); + MemoryViewBase_CBox_c_void_____CArc_c_void (*phys_view)(struct ConnectorInstanceContainer_CBox_c_void_____CArc_c_void *cont); +} PhysicalMemoryVtbl_ConnectorInstanceContainer_CBox_c_void_____CArc_c_void; + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +extern const struct ArchitectureObj *X86_32; + +extern const struct ArchitectureObj *X86_32_PAE; + +extern const struct ArchitectureObj *X86_64; + +/** + * Initialize logging with selected logging level. + */ +void log_init(LevelFilter level_filter); + +/** + * Logs a error message via log::error! * * # Safety * - * `arch` must be a valid heap allocated reference created by one of the API's functions. + * The provided string must be a valid null-terminated char array. */ -void arch_free(struct ArchitectureObj *arch); - -bool is_x86_arch(const struct ArchitectureObj *arch); +void log_error(const char *s); -Address os_process_info_address(const struct OsProcessInfoObj *obj); +/** + * Logs a warning message via log::warn! + * + * # Safety + * + * The provided string must be a valid null-terminated char array. + */ +void log_warn(const char *s); -PID os_process_info_pid(const struct OsProcessInfoObj *obj); +/** + * Logs a info message via log::info! + * + * # Safety + * + * The provided string must be a valid null-terminated char array. + */ +void log_info(const char *s); /** - * Retreive name of the process + * Logs a debug message via log::debug! + * + * # Safety * - * This will copy at most `max_len` characters (including the null terminator) into `out` of the - * name. + * The provided string must be a valid null-terminated char array. + */ +void log_debug(const char *s); + +/** + * Logs a trace message via log::trace! * * # Safety * - * `out` must be a buffer with at least `max_len` size + * The provided string must be a valid null-terminated char array. + */ +void log_trace(const char *s); + +/** + * Logs an error code with custom log level. + */ +void log_errorcode(Level level, int32_t error); + +/** + * Logs an error with debug log level. + */ +void log_debug_errorcode(int32_t error); + +/** + * Sets new maximum log level. + * + * If `inventory` is supplied, the log level is also updated within all plugin instances. However, + * if it is not supplied, plugins will not have their log levels updated, potentially leading to + * lower performance, or less logging than expected. */ -uintptr_t os_process_info_name(const struct OsProcessInfoObj *obj, char *out, uintptr_t max_len); +void log_set_max_level(LevelFilter level_filter, const struct Inventory *inventory); -const struct ArchitectureObj *os_process_info_sys_arch(const struct OsProcessInfoObj *obj); +/** + * Helper to convert `Address` to a `PhysicalAddress` + * + * This will create a `PhysicalAddress` with `UNKNOWN` PageType. + */ +struct PhysicalAddress addr_to_paddr(Address address); -const struct ArchitectureObj *os_process_info_proc_arch(const struct OsProcessInfoObj *obj); +/** + * Create a new connector inventory + * + * This function will try to find connectors using PATH environment variable + * + * Note that all functions go through each directories, and look for a `memflow` directory, + * and search for libraries in those. + * + * # Safety + * + * Inventory is inherently unsafe, because it loads shared libraries which can not be + * guaranteed to be safe. + */ +struct Inventory *inventory_scan(void); /** - * Free a OsProcessInfoObj reference + * Create a new inventory with custom path string * * # Safety * - * `obj` must point to a valid `OsProcessInfoObj`, and was created using one of the API's - * functions. + * `path` must be a valid null terminated string */ -void os_process_info_free(struct OsProcessInfoObj *obj); +struct Inventory *inventory_scan_path(const char *path); -Address os_process_module_address(const struct OsProcessModuleInfoObj *obj); +/** + * Add a directory to an existing inventory + * + * # Safety + * + * `dir` must be a valid null terminated string + */ +int32_t inventory_add_dir(struct Inventory *inv, const char *dir); -Address os_process_module_parent_process(const struct OsProcessModuleInfoObj *obj); +/** + * Create a connector with given arguments + * + * This creates an instance of `ConnectorInstance`. + * + * This instance needs to be dropped using `connector_drop`. + * + * # Arguments + * + * * `name` - name of the connector to use + * * `args` - arguments to be passed to the connector upon its creation + * + * # Safety + * + * Both `name`, and `args` must be valid null terminated strings. + * + * Any error strings returned by the connector must not be outputed after the connector gets + * freed, because that operation could cause the underlying shared library to get unloaded. + */ +int32_t inventory_create_connector(struct Inventory *inv, + const char *name, + const char *args, + MuConnectorInstanceArcBox *out); -Address os_process_module_base(const struct OsProcessModuleInfoObj *obj); +/** + * Create a OS instance with given arguments + * + * This creates an instance of `KernelInstance`. + * + * This instance needs to be freed using `os_drop`. + * + * # Arguments + * + * * `name` - name of the OS to use + * * `args` - arguments to be passed to the connector upon its creation + * * `mem` - a previously initialized connector instance + * * `out` - a valid memory location that will contain the resulting os-instance + * + * # Remarks + * + * The `mem` connector instance is being _moved_ into the os layer. + * This means upon calling `os_drop` it is not unnecessary to call `connector_drop` anymore. + * + * # Safety + * + * Both `name`, and `args` must be valid null terminated strings. + * + * Any error strings returned by the connector must not be outputed after the connector gets + * freed, because that operation could cause the underlying shared library to get unloaded. + */ +int32_t inventory_create_os(struct Inventory *inv, + const char *name, + const char *args, + ConnectorInstanceArcBox *mem, + MuOsInstanceArcBox *out); -uintptr_t os_process_module_size(const struct OsProcessModuleInfoObj *obj); +/** + * Free a os plugin + * + * # Safety + * + * `os` must point to a valid `OsInstance` that was created using one of the provided + * functions. + */ +void os_drop(OsInstanceArcBox *os); /** - * Retreive name of the module + * Clone a connector + * + * This method is useful when needing to perform multithreaded operations, as a connector is not + * guaranteed to be thread safe. Every single cloned instance also needs to be dropped using + * `connector_drop`. + * + * # Safety * - * This will copy at most `max_len` characters (including the null terminator) into `out` of the - * name. + * `conn` has to point to a a valid `CloneablePhysicalMemory` created by one of the provided + * functions. + */ +void connector_clone(const ConnectorInstanceArcBox *conn, MuConnectorInstanceArcBox *out); + +/** + * Free a connector instance * * # Safety * - * `out` must be a buffer with at least `max_len` size + * `conn` has to point to a valid [`ConnectorInstance`](ConnectorInstanceArcBox) created by one of the provided + * functions. + * + * There has to be no instance of `PhysicalMemory` created from the input `conn`, because they + * will become invalid. */ -uintptr_t os_process_module_name(const struct OsProcessModuleInfoObj *obj, - char *out, - uintptr_t max_len); +void connector_drop(ConnectorInstanceArcBox *conn); /** - * Free a OsProcessModuleInfoObj reference + * Free a connector inventory * * # Safety * - * `obj` must point to a valid `OsProcessModuleInfoObj`, and was created using one of the API's + * `inv` must point to a valid `Inventory` that was created using one of the provided * functions. */ -void os_process_module_free(struct OsProcessModuleInfoObj *obj); +void inventory_free(struct Inventory *inv); + +uint8_t arch_bits(const struct ArchitectureObj *arch); + +Endianess arch_endianess(const struct ArchitectureObj *arch); + +uintptr_t arch_page_size(const struct ArchitectureObj *arch); + +uintptr_t arch_size_addr(const struct ArchitectureObj *arch); + +uint8_t arch_address_space_bits(const struct ArchitectureObj *arch); + +/** + * Free an architecture reference + * + * # Safety + * + * `arch` must be a valid heap allocated reference created by one of the API's functions. + */ +void arch_free(struct ArchitectureObj *arch); + +bool is_x86_arch(const struct ArchitectureObj *arch); +static CArc_c_void ctx_arc_clone(CArc_c_void *self) { + CArc_c_void ret = *self; + ret.instance = self->clone_fn(self->instance); + return ret; +} + +void ctx_arc_drop(CArc_c_void *self) { + if (self->drop_fn && self->instance) self->drop_fn(self->instance); +} +void cont_box_drop(CBox_c_void *self) { + if (self->drop_fn && self->instance) self->drop_fn(self->instance); +} + +static inline void mf_pause(void *self) { +(((struct CGlueTraitObj_CBox_c_void_____CpuStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____CpuStateRetTmp_CArc_c_void______________CArc_c_void_____CpuStateRetTmp_CArc_c_void *)self)->vtbl)->pause(&((struct CGlueTraitObj_CBox_c_void_____CpuStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____CpuStateRetTmp_CArc_c_void______________CArc_c_void_____CpuStateRetTmp_CArc_c_void *)self)->container); + +} + +static inline void mf_resume(void *self) { +(((struct CGlueTraitObj_CBox_c_void_____CpuStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____CpuStateRetTmp_CArc_c_void______________CArc_c_void_____CpuStateRetTmp_CArc_c_void *)self)->vtbl)->resume(&((struct CGlueTraitObj_CBox_c_void_____CpuStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____CpuStateRetTmp_CArc_c_void______________CArc_c_void_____CpuStateRetTmp_CArc_c_void *)self)->container); + +} + +static inline void mf_cpustate_drop(struct CGlueTraitObj_CBox_c_void_____CpuStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____CpuStateRetTmp_CArc_c_void______________CArc_c_void_____CpuStateRetTmp_CArc_c_void self) { + cont_box_drop(&self.container.instance); + ctx_arc_drop(&self.container.context); + +} + +static inline bool mf_keyboardstate_is_down(const void *self, int32_t vk) { + bool __ret = (((const struct CGlueTraitObj_CBox_c_void_____KeyboardStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardStateRetTmp_CArc_c_void______________CArc_c_void_____KeyboardStateRetTmp_CArc_c_void *)self)->vtbl)->is_down(&((const struct CGlueTraitObj_CBox_c_void_____KeyboardStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardStateRetTmp_CArc_c_void______________CArc_c_void_____KeyboardStateRetTmp_CArc_c_void *)self)->container, vk); + return __ret; +} + +static inline void mf_keyboardstate_drop(struct CGlueTraitObj_CBox_c_void_____KeyboardStateVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardStateRetTmp_CArc_c_void______________CArc_c_void_____KeyboardStateRetTmp_CArc_c_void self) { + cont_box_drop(&self.container.instance); + ctx_arc_drop(&self.container.context); + +} + +static inline bool mf_keyboard_is_down(void *self, int32_t vk) { + bool __ret = (((struct CGlueTraitObj_CBox_c_void_____KeyboardVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardRetTmp_CArc_c_void______________CArc_c_void_____KeyboardRetTmp_CArc_c_void *)self)->vtbl)->is_down(&((struct CGlueTraitObj_CBox_c_void_____KeyboardVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardRetTmp_CArc_c_void______________CArc_c_void_____KeyboardRetTmp_CArc_c_void *)self)->container, vk); + return __ret; +} + +static inline void mf_set_down(void *self, int32_t vk, bool down) { +(((struct CGlueTraitObj_CBox_c_void_____KeyboardVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardRetTmp_CArc_c_void______________CArc_c_void_____KeyboardRetTmp_CArc_c_void *)self)->vtbl)->set_down(&((struct CGlueTraitObj_CBox_c_void_____KeyboardVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardRetTmp_CArc_c_void______________CArc_c_void_____KeyboardRetTmp_CArc_c_void *)self)->container, vk, down); + +} + +static inline int32_t mf_state(void *self, KeyboardStateBase_CBox_c_void_____CArc_c_void * ok_out) { + int32_t __ret = (((struct CGlueTraitObj_CBox_c_void_____KeyboardVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardRetTmp_CArc_c_void______________CArc_c_void_____KeyboardRetTmp_CArc_c_void *)self)->vtbl)->state(&((struct CGlueTraitObj_CBox_c_void_____KeyboardVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardRetTmp_CArc_c_void______________CArc_c_void_____KeyboardRetTmp_CArc_c_void *)self)->container, ok_out); + return __ret; +} + +static inline void mf_keyboard_drop(struct CGlueTraitObj_CBox_c_void_____KeyboardVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____KeyboardRetTmp_CArc_c_void______________CArc_c_void_____KeyboardRetTmp_CArc_c_void self) { + cont_box_drop(&self.container.instance); + ctx_arc_drop(&self.container.context); + +} + +static inline int32_t mf_read_raw_iter(void *self, ReadRawMemOps data) { + int32_t __ret = (((struct CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void *)self)->vtbl)->read_raw_iter(&((struct CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void *)self)->container, data); + return __ret; +} + +static inline int32_t mf_write_raw_iter(void *self, WriteRawMemOps data) { + int32_t __ret = (((struct CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void *)self)->vtbl)->write_raw_iter(&((struct CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void *)self)->container, data); + return __ret; +} + +static inline struct MemoryViewMetadata mf_metadata(const void *self) { + struct MemoryViewMetadata __ret = (((const struct CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void *)self)->vtbl)->metadata(&((const struct CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void *)self)->container); + return __ret; +} + +static inline int32_t mf_read_iter(void *self, struct CIterator_ReadData inp, ReadCallback * out, ReadCallback * out_fail) { + int32_t __ret = (((struct CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void *)self)->vtbl)->read_iter(&((struct CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void *)self)->container, inp, out, out_fail); + return __ret; +} + +static inline int32_t mf_read_raw_list(void *self, struct CSliceMut_ReadData data) { + int32_t __ret = (((struct CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void *)self)->vtbl)->read_raw_list(&((struct CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void *)self)->container, data); + return __ret; +} + +static inline int32_t mf_read_raw_into(void *self, Address addr, struct CSliceMut_u8 out) { + int32_t __ret = (((struct CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void *)self)->vtbl)->read_raw_into(&((struct CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void *)self)->container, addr, out); + return __ret; +} + +static inline int32_t mf_write_iter(void *self, struct CIterator_WriteData inp, WriteCallback * out, WriteCallback * out_fail) { + int32_t __ret = (((struct CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void *)self)->vtbl)->write_iter(&((struct CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void *)self)->container, inp, out, out_fail); + return __ret; +} + +static inline int32_t mf_write_raw_list(void *self, struct CSliceRef_WriteData data) { + int32_t __ret = (((struct CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void *)self)->vtbl)->write_raw_list(&((struct CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void *)self)->container, data); + return __ret; +} + +static inline int32_t mf_write_raw(void *self, Address addr, struct CSliceRef_u8 data) { + int32_t __ret = (((struct CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void *)self)->vtbl)->write_raw(&((struct CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void *)self)->container, addr, data); + return __ret; +} + +static inline void mf_memoryview_drop(struct CGlueTraitObj_CBox_c_void_____MemoryViewVtbl_CGlueObjContainer_CBox_c_void_____CArc_c_void_____MemoryViewRetTmp_CArc_c_void______________CArc_c_void_____MemoryViewRetTmp_CArc_c_void self) { + cont_box_drop(&self.container.instance); + ctx_arc_drop(&self.container.context); + +} + +static inline struct ConnectorInstance_CBox_c_void_____CArc_c_void mf_connectorinstance_clone(const void *self) { + struct ConnectorInstance_CBox_c_void_____CArc_c_void __ret; + __ret.container = (((const struct ConnectorInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_clone)->clone(&((const struct ConnectorInstance_CBox_c_void_____CArc_c_void *)self)->container); + return __ret; +} + +static inline void mf_connectorinstance_drop(struct ConnectorInstance_CBox_c_void_____CArc_c_void self) { + cont_box_drop(&self.container.instance); + ctx_arc_drop(&self.container.context); + +} + +static inline struct IntoCpuState_CBox_c_void_____CArc_c_void mf_intocpustate_clone(const void *self) { + struct IntoCpuState_CBox_c_void_____CArc_c_void __ret; + __ret.container = (((const struct IntoCpuState_CBox_c_void_____CArc_c_void *)self)->vtbl_clone)->clone(&((const struct IntoCpuState_CBox_c_void_____CArc_c_void *)self)->container); + return __ret; +} + +static inline void mf_intocpustate_drop(struct IntoCpuState_CBox_c_void_____CArc_c_void self) { + cont_box_drop(&self.container.instance); + ctx_arc_drop(&self.container.context); + +} + +static inline void mf_intocpustate_pause(void *self) { +(((struct IntoCpuState_CBox_c_void_____CArc_c_void *)self)->vtbl_cpustate)->pause(&((struct IntoCpuState_CBox_c_void_____CArc_c_void *)self)->container); + +} + +static inline void mf_intocpustate_resume(void *self) { +(((struct IntoCpuState_CBox_c_void_____CArc_c_void *)self)->vtbl_cpustate)->resume(&((struct IntoCpuState_CBox_c_void_____CArc_c_void *)self)->container); + +} + +static inline int32_t mf_connectorinstance_cpu_state(void *self, CpuStateBase_CBox_c_void_____CArc_c_void * ok_out) { + int32_t __ret = (((struct ConnectorInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_connectorcpustate)->cpu_state(&((struct ConnectorInstance_CBox_c_void_____CArc_c_void *)self)->container, ok_out); + return __ret; +} + +static inline int32_t mf_connectorinstance_into_cpu_state(struct ConnectorInstance_CBox_c_void_____CArc_c_void self, struct IntoCpuState_CBox_c_void_____CArc_c_void * ok_out) { + CArc_c_void ___ctx = ctx_arc_clone(&self.container.context); + int32_t __ret = (self.vtbl_connectorcpustate)->into_cpu_state(self.container, ok_out); + ctx_arc_drop(&___ctx); + return __ret; +} + +static inline struct OsInstance_CBox_c_void_____CArc_c_void mf_osinstance_clone(const void *self) { + struct OsInstance_CBox_c_void_____CArc_c_void __ret; + __ret.container = (((const struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_clone)->clone(&((const struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container); + return __ret; +} + +static inline void mf_osinstance_drop(struct OsInstance_CBox_c_void_____CArc_c_void self) { + cont_box_drop(&self.container.instance); + ctx_arc_drop(&self.container.context); + +} + +static inline int32_t mf_osinstance_process_address_list_callback(void *self, AddressCallback callback) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_os)->process_address_list_callback(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, callback); + return __ret; +} + +static inline int32_t mf_osinstance_process_info_list_callback(void *self, ProcessInfoCallback callback) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_os)->process_info_list_callback(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, callback); + return __ret; +} + +static inline int32_t mf_osinstance_process_info_by_address(void *self, Address address, struct ProcessInfo * ok_out) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_os)->process_info_by_address(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, address, ok_out); + return __ret; +} + +static inline int32_t mf_osinstance_process_info_by_name(void *self, struct CSliceRef_u8 name, struct ProcessInfo * ok_out) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_os)->process_info_by_name(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, name, ok_out); + return __ret; +} + +static inline int32_t mf_osinstance_process_info_by_pid(void *self, Pid pid, struct ProcessInfo * ok_out) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_os)->process_info_by_pid(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, pid, ok_out); + return __ret; +} + +static inline int32_t mf_osinstance_process_by_info(void *self, struct ProcessInfo info, struct ProcessInstance_CBox_c_void_____CArc_c_void * ok_out) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_os)->process_by_info(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, info, ok_out); + return __ret; +} + +static inline int32_t mf_osinstance_into_process_by_info(struct OsInstance_CBox_c_void_____CArc_c_void self, struct ProcessInfo info, struct IntoProcessInstance_CBox_c_void_____CArc_c_void * ok_out) { + CArc_c_void ___ctx = ctx_arc_clone(&self.container.context); + int32_t __ret = (self.vtbl_os)->into_process_by_info(self.container, info, ok_out); + ctx_arc_drop(&___ctx); + return __ret; +} + +static inline int32_t mf_osinstance_process_by_address(void *self, Address addr, struct ProcessInstance_CBox_c_void_____CArc_c_void * ok_out) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_os)->process_by_address(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, addr, ok_out); + return __ret; +} + +static inline int32_t mf_osinstance_process_by_name(void *self, struct CSliceRef_u8 name, struct ProcessInstance_CBox_c_void_____CArc_c_void * ok_out) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_os)->process_by_name(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, name, ok_out); + return __ret; +} + +static inline int32_t mf_osinstance_process_by_pid(void *self, Pid pid, struct ProcessInstance_CBox_c_void_____CArc_c_void * ok_out) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_os)->process_by_pid(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, pid, ok_out); + return __ret; +} + +static inline int32_t mf_osinstance_into_process_by_address(struct OsInstance_CBox_c_void_____CArc_c_void self, Address addr, struct IntoProcessInstance_CBox_c_void_____CArc_c_void * ok_out) { + CArc_c_void ___ctx = ctx_arc_clone(&self.container.context); + int32_t __ret = (self.vtbl_os)->into_process_by_address(self.container, addr, ok_out); + ctx_arc_drop(&___ctx); + return __ret; +} + +static inline int32_t mf_osinstance_into_process_by_name(struct OsInstance_CBox_c_void_____CArc_c_void self, struct CSliceRef_u8 name, struct IntoProcessInstance_CBox_c_void_____CArc_c_void * ok_out) { + CArc_c_void ___ctx = ctx_arc_clone(&self.container.context); + int32_t __ret = (self.vtbl_os)->into_process_by_name(self.container, name, ok_out); + ctx_arc_drop(&___ctx); + return __ret; +} + +static inline int32_t mf_osinstance_into_process_by_pid(struct OsInstance_CBox_c_void_____CArc_c_void self, Pid pid, struct IntoProcessInstance_CBox_c_void_____CArc_c_void * ok_out) { + CArc_c_void ___ctx = ctx_arc_clone(&self.container.context); + int32_t __ret = (self.vtbl_os)->into_process_by_pid(self.container, pid, ok_out); + ctx_arc_drop(&___ctx); + return __ret; +} + +static inline int32_t mf_osinstance_module_address_list_callback(void *self, AddressCallback callback) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_os)->module_address_list_callback(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, callback); + return __ret; +} + +static inline int32_t mf_osinstance_module_list_callback(void *self, ModuleInfoCallback callback) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_os)->module_list_callback(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, callback); + return __ret; +} + +static inline int32_t mf_osinstance_module_by_address(void *self, Address address, struct ModuleInfo * ok_out) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_os)->module_by_address(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, address, ok_out); + return __ret; +} + +static inline int32_t mf_osinstance_module_by_name(void *self, struct CSliceRef_u8 name, struct ModuleInfo * ok_out) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_os)->module_by_name(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, name, ok_out); + return __ret; +} + +static inline int32_t mf_osinstance_primary_module_address(void *self, Address * ok_out) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_os)->primary_module_address(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, ok_out); + return __ret; +} + +static inline int32_t mf_osinstance_primary_module(void *self, struct ModuleInfo * ok_out) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_os)->primary_module(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, ok_out); + return __ret; +} + +static inline int32_t mf_osinstance_module_import_list_callback(void *self, const struct ModuleInfo * info, ImportCallback callback) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_os)->module_import_list_callback(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, info, callback); + return __ret; +} + +static inline int32_t mf_osinstance_module_export_list_callback(void *self, const struct ModuleInfo * info, ExportCallback callback) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_os)->module_export_list_callback(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, info, callback); + return __ret; +} + +static inline int32_t mf_osinstance_module_section_list_callback(void *self, const struct ModuleInfo * info, SectionCallback callback) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_os)->module_section_list_callback(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, info, callback); + return __ret; +} + +static inline int32_t mf_osinstance_module_import_by_name(void *self, const struct ModuleInfo * info, struct CSliceRef_u8 name, struct ImportInfo * ok_out) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_os)->module_import_by_name(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, info, name, ok_out); + return __ret; +} + +static inline int32_t mf_osinstance_module_export_by_name(void *self, const struct ModuleInfo * info, struct CSliceRef_u8 name, struct ExportInfo * ok_out) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_os)->module_export_by_name(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, info, name, ok_out); + return __ret; +} + +static inline int32_t mf_osinstance_module_section_by_name(void *self, const struct ModuleInfo * info, struct CSliceRef_u8 name, struct SectionInfo * ok_out) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_os)->module_section_by_name(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, info, name, ok_out); + return __ret; +} + +static inline const struct OsInfo * mf_osinstance_info(const void *self) { + const struct OsInfo * __ret = (((const struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_os)->info(&((const struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container); + return __ret; +} + +static inline int32_t mf_osinstance_read_raw_iter(void *self, ReadRawMemOps data) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->read_raw_iter(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, data); + return __ret; +} + +static inline int32_t mf_osinstance_write_raw_iter(void *self, WriteRawMemOps data) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->write_raw_iter(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, data); + return __ret; +} + +static inline struct MemoryViewMetadata mf_osinstance_metadata(const void *self) { + struct MemoryViewMetadata __ret = (((const struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->metadata(&((const struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container); + return __ret; +} + +static inline int32_t mf_osinstance_read_iter(void *self, struct CIterator_ReadData inp, ReadCallback * out, ReadCallback * out_fail) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->read_iter(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, inp, out, out_fail); + return __ret; +} + +static inline int32_t mf_osinstance_read_raw_list(void *self, struct CSliceMut_ReadData data) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->read_raw_list(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, data); + return __ret; +} + +static inline int32_t mf_osinstance_read_raw_into(void *self, Address addr, struct CSliceMut_u8 out) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->read_raw_into(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, addr, out); + return __ret; +} + +static inline int32_t mf_osinstance_write_iter(void *self, struct CIterator_WriteData inp, WriteCallback * out, WriteCallback * out_fail) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->write_iter(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, inp, out, out_fail); + return __ret; +} + +static inline int32_t mf_osinstance_write_raw_list(void *self, struct CSliceRef_WriteData data) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->write_raw_list(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, data); + return __ret; +} + +static inline int32_t mf_osinstance_write_raw(void *self, Address addr, struct CSliceRef_u8 data) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->write_raw(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, addr, data); + return __ret; +} + +static inline struct IntoKeyboard_CBox_c_void_____CArc_c_void mf_intokeyboard_clone(const void *self) { + struct IntoKeyboard_CBox_c_void_____CArc_c_void __ret; + __ret.container = (((const struct IntoKeyboard_CBox_c_void_____CArc_c_void *)self)->vtbl_clone)->clone(&((const struct IntoKeyboard_CBox_c_void_____CArc_c_void *)self)->container); + return __ret; +} + +static inline void mf_intokeyboard_drop(struct IntoKeyboard_CBox_c_void_____CArc_c_void self) { + cont_box_drop(&self.container.instance); + ctx_arc_drop(&self.container.context); + +} + +static inline bool mf_intokeyboard_is_down(void *self, int32_t vk) { + bool __ret = (((struct IntoKeyboard_CBox_c_void_____CArc_c_void *)self)->vtbl_keyboard)->is_down(&((struct IntoKeyboard_CBox_c_void_____CArc_c_void *)self)->container, vk); + return __ret; +} + +static inline void mf_intokeyboard_set_down(void *self, int32_t vk, bool down) { +(((struct IntoKeyboard_CBox_c_void_____CArc_c_void *)self)->vtbl_keyboard)->set_down(&((struct IntoKeyboard_CBox_c_void_____CArc_c_void *)self)->container, vk, down); + +} + +static inline int32_t mf_intokeyboard_state(void *self, KeyboardStateBase_CBox_c_void_____CArc_c_void * ok_out) { + int32_t __ret = (((struct IntoKeyboard_CBox_c_void_____CArc_c_void *)self)->vtbl_keyboard)->state(&((struct IntoKeyboard_CBox_c_void_____CArc_c_void *)self)->container, ok_out); + return __ret; +} + +static inline int32_t mf_osinstance_keyboard(void *self, KeyboardBase_CBox_c_void_____CArc_c_void * ok_out) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_oskeyboard)->keyboard(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, ok_out); + return __ret; +} + +static inline int32_t mf_osinstance_into_keyboard(struct OsInstance_CBox_c_void_____CArc_c_void self, struct IntoKeyboard_CBox_c_void_____CArc_c_void * ok_out) { + CArc_c_void ___ctx = ctx_arc_clone(&self.container.context); + int32_t __ret = (self.vtbl_oskeyboard)->into_keyboard(self.container, ok_out); + ctx_arc_drop(&___ctx); + return __ret; +} + +static inline int32_t mf_osinstance_phys_read_raw_iter(void *self, PhysicalReadMemOps data) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_physicalmemory)->phys_read_raw_iter(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, data); + return __ret; +} + +static inline int32_t mf_osinstance_phys_write_raw_iter(void *self, PhysicalWriteMemOps data) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_physicalmemory)->phys_write_raw_iter(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, data); + return __ret; +} + +static inline void mf_osinstance_set_mem_map(void *self, struct CSliceRef_PhysicalMemoryMapping _mem_map) { +(((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_physicalmemory)->set_mem_map(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, _mem_map); + +} + +static inline MemoryViewBase_CBox_c_void_____CArc_c_void mf_osinstance_into_phys_view(struct OsInstance_CBox_c_void_____CArc_c_void self) { + CArc_c_void ___ctx = ctx_arc_clone(&self.container.context); + MemoryViewBase_CBox_c_void_____CArc_c_void __ret = (self.vtbl_physicalmemory)->into_phys_view(self.container); + ctx_arc_drop(&___ctx); + return __ret; +} + +static inline MemoryViewBase_CBox_c_void_____CArc_c_void mf_osinstance_phys_view(void *self) { + MemoryViewBase_CBox_c_void_____CArc_c_void __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_physicalmemory)->phys_view(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container); + return __ret; +} + +static inline void mf_osinstance_virt_to_phys_list(void *self, struct CSliceRef_VtopRange addrs, VirtualTranslationCallback out, VirtualTranslationFailCallback out_fail) { +(((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_to_phys_list(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, addrs, out, out_fail); + +} + +static inline void mf_osinstance_virt_to_phys_range(void *self, Address start, Address end, VirtualTranslationCallback out) { +(((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_to_phys_range(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, start, end, out); + +} + +static inline void mf_osinstance_virt_translation_map_range(void *self, Address start, Address end, VirtualTranslationCallback out) { +(((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_translation_map_range(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, start, end, out); + +} + +static inline void mf_osinstance_virt_page_map_range(void *self, imem gap_size, Address start, Address end, MemoryRangeCallback out) { +(((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_page_map_range(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, gap_size, start, end, out); + +} + +static inline int32_t mf_osinstance_virt_to_phys(void *self, Address address, struct PhysicalAddress * ok_out) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_to_phys(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, address, ok_out); + return __ret; +} + +static inline int32_t mf_osinstance_virt_page_info(void *self, Address addr, struct Page * ok_out) { + int32_t __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_page_info(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, addr, ok_out); + return __ret; +} + +static inline void mf_osinstance_virt_translation_map(void *self, VirtualTranslationCallback out) { +(((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_translation_map(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, out); + +} + +static inline struct COption_Address mf_osinstance_phys_to_virt(void *self, Address phys) { + struct COption_Address __ret = (((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->phys_to_virt(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, phys); + return __ret; +} + +static inline void mf_osinstance_virt_page_map(void *self, imem gap_size, MemoryRangeCallback out) { +(((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_page_map(&((struct OsInstance_CBox_c_void_____CArc_c_void *)self)->container, gap_size, out); + +} + +static inline int32_t mf_processinstance_read_raw_iter(void *self, ReadRawMemOps data) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->read_raw_iter(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, data); + return __ret; +} + +static inline int32_t mf_processinstance_write_raw_iter(void *self, WriteRawMemOps data) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->write_raw_iter(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, data); + return __ret; +} + +static inline struct MemoryViewMetadata mf_processinstance_metadata(const void *self) { + struct MemoryViewMetadata __ret = (((const struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->metadata(&((const struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container); + return __ret; +} + +static inline int32_t mf_processinstance_read_iter(void *self, struct CIterator_ReadData inp, ReadCallback * out, ReadCallback * out_fail) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->read_iter(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, inp, out, out_fail); + return __ret; +} + +static inline int32_t mf_processinstance_read_raw_list(void *self, struct CSliceMut_ReadData data) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->read_raw_list(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, data); + return __ret; +} + +static inline int32_t mf_processinstance_read_raw_into(void *self, Address addr, struct CSliceMut_u8 out) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->read_raw_into(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, addr, out); + return __ret; +} + +static inline int32_t mf_processinstance_write_iter(void *self, struct CIterator_WriteData inp, WriteCallback * out, WriteCallback * out_fail) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->write_iter(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, inp, out, out_fail); + return __ret; +} + +static inline int32_t mf_processinstance_write_raw_list(void *self, struct CSliceRef_WriteData data) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->write_raw_list(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, data); + return __ret; +} + +static inline int32_t mf_processinstance_write_raw(void *self, Address addr, struct CSliceRef_u8 data) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->write_raw(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, addr, data); + return __ret; +} + +static inline void mf_processinstance_drop(struct ProcessInstance_CBox_c_void_____CArc_c_void self) { + cont_box_drop(&self.container.instance); + ctx_arc_drop(&self.container.context); + +} + +static inline struct ProcessState mf_processinstance_state(void *self) { + struct ProcessState __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->state(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container); + return __ret; +} + +static inline int32_t mf_processinstance_set_dtb(void *self, Address dtb1, Address dtb2) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->set_dtb(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, dtb1, dtb2); + return __ret; +} + +static inline int32_t mf_processinstance_module_address_list_callback(void *self, const struct ArchitectureIdent * target_arch, ModuleAddressCallback callback) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->module_address_list_callback(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, target_arch, callback); + return __ret; +} + +static inline int32_t mf_processinstance_module_list_callback(void *self, const struct ArchitectureIdent * target_arch, ModuleInfoCallback callback) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->module_list_callback(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, target_arch, callback); + return __ret; +} + +static inline int32_t mf_processinstance_module_by_address(void *self, Address address, struct ArchitectureIdent architecture, struct ModuleInfo * ok_out) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->module_by_address(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, address, architecture, ok_out); + return __ret; +} + +static inline int32_t mf_processinstance_module_by_name_arch(void *self, struct CSliceRef_u8 name, const struct ArchitectureIdent * architecture, struct ModuleInfo * ok_out) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->module_by_name_arch(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, name, architecture, ok_out); + return __ret; +} + +static inline int32_t mf_processinstance_module_by_name(void *self, struct CSliceRef_u8 name, struct ModuleInfo * ok_out) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->module_by_name(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, name, ok_out); + return __ret; +} + +static inline int32_t mf_processinstance_primary_module_address(void *self, Address * ok_out) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->primary_module_address(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, ok_out); + return __ret; +} + +static inline int32_t mf_processinstance_primary_module(void *self, struct ModuleInfo * ok_out) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->primary_module(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, ok_out); + return __ret; +} + +static inline int32_t mf_processinstance_module_import_list_callback(void *self, const struct ModuleInfo * info, ImportCallback callback) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->module_import_list_callback(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, info, callback); + return __ret; +} + +static inline int32_t mf_processinstance_module_export_list_callback(void *self, const struct ModuleInfo * info, ExportCallback callback) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->module_export_list_callback(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, info, callback); + return __ret; +} + +static inline int32_t mf_processinstance_module_section_list_callback(void *self, const struct ModuleInfo * info, SectionCallback callback) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->module_section_list_callback(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, info, callback); + return __ret; +} + +static inline int32_t mf_processinstance_module_import_by_name(void *self, const struct ModuleInfo * info, struct CSliceRef_u8 name, struct ImportInfo * ok_out) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->module_import_by_name(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, info, name, ok_out); + return __ret; +} + +static inline int32_t mf_processinstance_module_export_by_name(void *self, const struct ModuleInfo * info, struct CSliceRef_u8 name, struct ExportInfo * ok_out) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->module_export_by_name(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, info, name, ok_out); + return __ret; +} + +static inline int32_t mf_processinstance_module_section_by_name(void *self, const struct ModuleInfo * info, struct CSliceRef_u8 name, struct SectionInfo * ok_out) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->module_section_by_name(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, info, name, ok_out); + return __ret; +} + +static inline const struct ProcessInfo * mf_processinstance_info(const void *self) { + const struct ProcessInfo * __ret = (((const struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->info(&((const struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container); + return __ret; +} + +static inline void mf_processinstance_mapped_mem_range(void *self, imem gap_size, Address start, Address end, MemoryRangeCallback out) { +(((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->mapped_mem_range(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, gap_size, start, end, out); + +} + +static inline void mf_processinstance_mapped_mem(void *self, imem gap_size, MemoryRangeCallback out) { +(((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->mapped_mem(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, gap_size, out); + +} + +static inline void mf_processinstance_virt_to_phys_list(void *self, struct CSliceRef_VtopRange addrs, VirtualTranslationCallback out, VirtualTranslationFailCallback out_fail) { +(((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_to_phys_list(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, addrs, out, out_fail); + +} + +static inline void mf_processinstance_virt_to_phys_range(void *self, Address start, Address end, VirtualTranslationCallback out) { +(((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_to_phys_range(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, start, end, out); + +} + +static inline void mf_processinstance_virt_translation_map_range(void *self, Address start, Address end, VirtualTranslationCallback out) { +(((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_translation_map_range(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, start, end, out); + +} + +static inline void mf_processinstance_virt_page_map_range(void *self, imem gap_size, Address start, Address end, MemoryRangeCallback out) { +(((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_page_map_range(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, gap_size, start, end, out); + +} + +static inline int32_t mf_processinstance_virt_to_phys(void *self, Address address, struct PhysicalAddress * ok_out) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_to_phys(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, address, ok_out); + return __ret; +} + +static inline int32_t mf_processinstance_virt_page_info(void *self, Address addr, struct Page * ok_out) { + int32_t __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_page_info(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, addr, ok_out); + return __ret; +} + +static inline void mf_processinstance_virt_translation_map(void *self, VirtualTranslationCallback out) { +(((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_translation_map(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, out); + +} + +static inline struct COption_Address mf_processinstance_phys_to_virt(void *self, Address phys) { + struct COption_Address __ret = (((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->phys_to_virt(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, phys); + return __ret; +} + +static inline void mf_processinstance_virt_page_map(void *self, imem gap_size, MemoryRangeCallback out) { +(((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_page_map(&((struct ProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, gap_size, out); + +} + +static inline struct IntoProcessInstance_CBox_c_void_____CArc_c_void mf_intoprocessinstance_clone(const void *self) { + struct IntoProcessInstance_CBox_c_void_____CArc_c_void __ret; + __ret.container = (((const struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_clone)->clone(&((const struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container); + return __ret; +} + +static inline void mf_intoprocessinstance_drop(struct IntoProcessInstance_CBox_c_void_____CArc_c_void self) { + cont_box_drop(&self.container.instance); + ctx_arc_drop(&self.container.context); + +} + +static inline int32_t mf_intoprocessinstance_read_raw_iter(void *self, ReadRawMemOps data) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->read_raw_iter(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, data); + return __ret; +} + +static inline int32_t mf_intoprocessinstance_write_raw_iter(void *self, WriteRawMemOps data) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->write_raw_iter(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, data); + return __ret; +} + +static inline struct MemoryViewMetadata mf_intoprocessinstance_metadata(const void *self) { + struct MemoryViewMetadata __ret = (((const struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->metadata(&((const struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container); + return __ret; +} + +static inline int32_t mf_intoprocessinstance_read_iter(void *self, struct CIterator_ReadData inp, ReadCallback * out, ReadCallback * out_fail) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->read_iter(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, inp, out, out_fail); + return __ret; +} + +static inline int32_t mf_intoprocessinstance_read_raw_list(void *self, struct CSliceMut_ReadData data) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->read_raw_list(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, data); + return __ret; +} + +static inline int32_t mf_intoprocessinstance_read_raw_into(void *self, Address addr, struct CSliceMut_u8 out) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->read_raw_into(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, addr, out); + return __ret; +} + +static inline int32_t mf_intoprocessinstance_write_iter(void *self, struct CIterator_WriteData inp, WriteCallback * out, WriteCallback * out_fail) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->write_iter(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, inp, out, out_fail); + return __ret; +} + +static inline int32_t mf_intoprocessinstance_write_raw_list(void *self, struct CSliceRef_WriteData data) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->write_raw_list(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, data); + return __ret; +} + +static inline int32_t mf_intoprocessinstance_write_raw(void *self, Address addr, struct CSliceRef_u8 data) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_memoryview)->write_raw(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, addr, data); + return __ret; +} + +static inline struct ProcessState mf_intoprocessinstance_state(void *self) { + struct ProcessState __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->state(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container); + return __ret; +} + +static inline int32_t mf_intoprocessinstance_set_dtb(void *self, Address dtb1, Address dtb2) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->set_dtb(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, dtb1, dtb2); + return __ret; +} + +static inline int32_t mf_intoprocessinstance_module_address_list_callback(void *self, const struct ArchitectureIdent * target_arch, ModuleAddressCallback callback) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->module_address_list_callback(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, target_arch, callback); + return __ret; +} + +static inline int32_t mf_intoprocessinstance_module_list_callback(void *self, const struct ArchitectureIdent * target_arch, ModuleInfoCallback callback) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->module_list_callback(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, target_arch, callback); + return __ret; +} + +static inline int32_t mf_intoprocessinstance_module_by_address(void *self, Address address, struct ArchitectureIdent architecture, struct ModuleInfo * ok_out) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->module_by_address(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, address, architecture, ok_out); + return __ret; +} + +static inline int32_t mf_intoprocessinstance_module_by_name_arch(void *self, struct CSliceRef_u8 name, const struct ArchitectureIdent * architecture, struct ModuleInfo * ok_out) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->module_by_name_arch(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, name, architecture, ok_out); + return __ret; +} + +static inline int32_t mf_intoprocessinstance_module_by_name(void *self, struct CSliceRef_u8 name, struct ModuleInfo * ok_out) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->module_by_name(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, name, ok_out); + return __ret; +} + +static inline int32_t mf_intoprocessinstance_primary_module_address(void *self, Address * ok_out) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->primary_module_address(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, ok_out); + return __ret; +} + +static inline int32_t mf_intoprocessinstance_primary_module(void *self, struct ModuleInfo * ok_out) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->primary_module(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, ok_out); + return __ret; +} + +static inline int32_t mf_intoprocessinstance_module_import_list_callback(void *self, const struct ModuleInfo * info, ImportCallback callback) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->module_import_list_callback(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, info, callback); + return __ret; +} + +static inline int32_t mf_intoprocessinstance_module_export_list_callback(void *self, const struct ModuleInfo * info, ExportCallback callback) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->module_export_list_callback(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, info, callback); + return __ret; +} + +static inline int32_t mf_intoprocessinstance_module_section_list_callback(void *self, const struct ModuleInfo * info, SectionCallback callback) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->module_section_list_callback(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, info, callback); + return __ret; +} + +static inline int32_t mf_intoprocessinstance_module_import_by_name(void *self, const struct ModuleInfo * info, struct CSliceRef_u8 name, struct ImportInfo * ok_out) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->module_import_by_name(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, info, name, ok_out); + return __ret; +} + +static inline int32_t mf_intoprocessinstance_module_export_by_name(void *self, const struct ModuleInfo * info, struct CSliceRef_u8 name, struct ExportInfo * ok_out) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->module_export_by_name(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, info, name, ok_out); + return __ret; +} + +static inline int32_t mf_intoprocessinstance_module_section_by_name(void *self, const struct ModuleInfo * info, struct CSliceRef_u8 name, struct SectionInfo * ok_out) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->module_section_by_name(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, info, name, ok_out); + return __ret; +} + +static inline const struct ProcessInfo * mf_intoprocessinstance_info(const void *self) { + const struct ProcessInfo * __ret = (((const struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->info(&((const struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container); + return __ret; +} + +static inline void mf_intoprocessinstance_mapped_mem_range(void *self, imem gap_size, Address start, Address end, MemoryRangeCallback out) { +(((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->mapped_mem_range(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, gap_size, start, end, out); + +} + +static inline void mf_intoprocessinstance_mapped_mem(void *self, imem gap_size, MemoryRangeCallback out) { +(((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_process)->mapped_mem(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, gap_size, out); + +} + +static inline void mf_intoprocessinstance_virt_to_phys_list(void *self, struct CSliceRef_VtopRange addrs, VirtualTranslationCallback out, VirtualTranslationFailCallback out_fail) { +(((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_to_phys_list(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, addrs, out, out_fail); + +} + +static inline void mf_intoprocessinstance_virt_to_phys_range(void *self, Address start, Address end, VirtualTranslationCallback out) { +(((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_to_phys_range(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, start, end, out); + +} + +static inline void mf_intoprocessinstance_virt_translation_map_range(void *self, Address start, Address end, VirtualTranslationCallback out) { +(((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_translation_map_range(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, start, end, out); + +} + +static inline void mf_intoprocessinstance_virt_page_map_range(void *self, imem gap_size, Address start, Address end, MemoryRangeCallback out) { +(((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_page_map_range(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, gap_size, start, end, out); + +} + +static inline int32_t mf_intoprocessinstance_virt_to_phys(void *self, Address address, struct PhysicalAddress * ok_out) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_to_phys(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, address, ok_out); + return __ret; +} + +static inline int32_t mf_intoprocessinstance_virt_page_info(void *self, Address addr, struct Page * ok_out) { + int32_t __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_page_info(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, addr, ok_out); + return __ret; +} + +static inline void mf_intoprocessinstance_virt_translation_map(void *self, VirtualTranslationCallback out) { +(((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_translation_map(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, out); + +} + +static inline struct COption_Address mf_intoprocessinstance_phys_to_virt(void *self, Address phys) { + struct COption_Address __ret = (((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->phys_to_virt(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, phys); + return __ret; +} + +static inline void mf_intoprocessinstance_virt_page_map(void *self, imem gap_size, MemoryRangeCallback out) { +(((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_virtualtranslate)->virt_page_map(&((struct IntoProcessInstance_CBox_c_void_____CArc_c_void *)self)->container, gap_size, out); + +} + +static inline int32_t mf_connectorinstance_phys_read_raw_iter(void *self, PhysicalReadMemOps data) { + int32_t __ret = (((struct ConnectorInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_physicalmemory)->phys_read_raw_iter(&((struct ConnectorInstance_CBox_c_void_____CArc_c_void *)self)->container, data); + return __ret; +} + +static inline int32_t mf_connectorinstance_phys_write_raw_iter(void *self, PhysicalWriteMemOps data) { + int32_t __ret = (((struct ConnectorInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_physicalmemory)->phys_write_raw_iter(&((struct ConnectorInstance_CBox_c_void_____CArc_c_void *)self)->container, data); + return __ret; +} + +static inline struct PhysicalMemoryMetadata mf_connectorinstance_metadata(const void *self) { + struct PhysicalMemoryMetadata __ret = (((const struct ConnectorInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_physicalmemory)->metadata(&((const struct ConnectorInstance_CBox_c_void_____CArc_c_void *)self)->container); + return __ret; +} + +static inline void mf_connectorinstance_set_mem_map(void *self, struct CSliceRef_PhysicalMemoryMapping _mem_map) { +(((struct ConnectorInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_physicalmemory)->set_mem_map(&((struct ConnectorInstance_CBox_c_void_____CArc_c_void *)self)->container, _mem_map); + +} + +static inline MemoryViewBase_CBox_c_void_____CArc_c_void mf_connectorinstance_into_phys_view(struct ConnectorInstance_CBox_c_void_____CArc_c_void self) { + CArc_c_void ___ctx = ctx_arc_clone(&self.container.context); + MemoryViewBase_CBox_c_void_____CArc_c_void __ret = (self.vtbl_physicalmemory)->into_phys_view(self.container); + ctx_arc_drop(&___ctx); + return __ret; +} + +static inline MemoryViewBase_CBox_c_void_____CArc_c_void mf_connectorinstance_phys_view(void *self) { + MemoryViewBase_CBox_c_void_____CArc_c_void __ret = (((struct ConnectorInstance_CBox_c_void_____CArc_c_void *)self)->vtbl_physicalmemory)->phys_view(&((struct ConnectorInstance_CBox_c_void_____CArc_c_void *)self)->container); + return __ret; +} + +struct CollectBase { + /* Pointer to array of data */ + char *buf; + /* Capacity of the buffer (in elements) */ + size_t capacity; + /* Current size of the buffer (in elements) */ + size_t size; +}; + +// For memcpy +#include + +static bool cb_collect_static_base(struct CollectBase *ctx, size_t elem_size, void *info) { + + if (ctx->size < ctx->capacity) { + memcpy(ctx->buf + elem_size * ctx->size++, info, elem_size); + } + + return ctx->size < ctx->capacity; +} + +static bool cb_collect_dynamic_base(struct CollectBase *ctx, size_t elem_size, void *info) { + + if (!ctx->buf || ctx->size >= ctx->capacity) { + size_t new_capacity = ctx->buf ? ctx->capacity * 2 : 64; + char *buf = (char *)realloc(ctx->buf, elem_size * new_capacity); + if (buf) { + ctx->buf = buf; + ctx->capacity = new_capacity; + } + } + + if (!ctx->buf || ctx->size >= ctx->capacity) return false; + + memcpy(ctx->buf + elem_size * ctx->size++, info, elem_size); + + return true; +} + +struct BufferIterator { + /* Pointer to the data buffer */ + const char *buf; + /* Number of elements in the buffer */ + size_t size; + /* Current element index */ + size_t i; + /* Size of the data element */ + size_t sz_elem; +}; + +static bool buf_iter_next(struct BufferIterator *iter, void *out) { + if (iter->i >= iter->size) return 1; + memcpy(out, iter->buf + iter->i++ * iter->sz_elem, iter->sz_elem); + return 0; +} + +static inline bool cb_collect_static_ReadData(struct CollectBase *ctx, ReadData info) { + return cb_collect_static_base(ctx, sizeof(ReadData), &info); +} + +static inline bool cb_collect_dynamic_ReadData(struct CollectBase *ctx, ReadData info) { + return cb_collect_dynamic_base(ctx, sizeof(ReadData), &info); +} + +static inline bool cb_count_ReadData(size_t *cnt, ReadData info) { + return ++(*cnt); +} + +static inline bool cb_collect_static_WriteData(struct CollectBase *ctx, WriteData info) { + return cb_collect_static_base(ctx, sizeof(WriteData), &info); +} + +static inline bool cb_collect_dynamic_WriteData(struct CollectBase *ctx, WriteData info) { + return cb_collect_dynamic_base(ctx, sizeof(WriteData), &info); +} + +static inline bool cb_count_WriteData(size_t *cnt, WriteData info) { + return ++(*cnt); +} + +static inline bool cb_collect_static_Address(struct CollectBase *ctx, Address info) { + return cb_collect_static_base(ctx, sizeof(Address), &info); +} + +static inline bool cb_collect_dynamic_Address(struct CollectBase *ctx, Address info) { + return cb_collect_dynamic_base(ctx, sizeof(Address), &info); +} + +static inline bool cb_count_Address(size_t *cnt, Address info) { + return ++(*cnt); +} + +static inline bool cb_collect_static_ProcessInfo(struct CollectBase *ctx, ProcessInfo info) { + return cb_collect_static_base(ctx, sizeof(ProcessInfo), &info); +} + +static inline bool cb_collect_dynamic_ProcessInfo(struct CollectBase *ctx, ProcessInfo info) { + return cb_collect_dynamic_base(ctx, sizeof(ProcessInfo), &info); +} + +static inline bool cb_count_ProcessInfo(size_t *cnt, ProcessInfo info) { + return ++(*cnt); +} + +static inline bool cb_collect_static_ModuleAddressInfo(struct CollectBase *ctx, ModuleAddressInfo info) { + return cb_collect_static_base(ctx, sizeof(ModuleAddressInfo), &info); +} + +static inline bool cb_collect_dynamic_ModuleAddressInfo(struct CollectBase *ctx, ModuleAddressInfo info) { + return cb_collect_dynamic_base(ctx, sizeof(ModuleAddressInfo), &info); +} + +static inline bool cb_count_ModuleAddressInfo(size_t *cnt, ModuleAddressInfo info) { + return ++(*cnt); +} + +static inline bool cb_collect_static_ModuleInfo(struct CollectBase *ctx, ModuleInfo info) { + return cb_collect_static_base(ctx, sizeof(ModuleInfo), &info); +} + +static inline bool cb_collect_dynamic_ModuleInfo(struct CollectBase *ctx, ModuleInfo info) { + return cb_collect_dynamic_base(ctx, sizeof(ModuleInfo), &info); +} + +static inline bool cb_count_ModuleInfo(size_t *cnt, ModuleInfo info) { + return ++(*cnt); +} + +static inline bool cb_collect_static_ImportInfo(struct CollectBase *ctx, ImportInfo info) { + return cb_collect_static_base(ctx, sizeof(ImportInfo), &info); +} + +static inline bool cb_collect_dynamic_ImportInfo(struct CollectBase *ctx, ImportInfo info) { + return cb_collect_dynamic_base(ctx, sizeof(ImportInfo), &info); +} + +static inline bool cb_count_ImportInfo(size_t *cnt, ImportInfo info) { + return ++(*cnt); +} + +static inline bool cb_collect_static_ExportInfo(struct CollectBase *ctx, ExportInfo info) { + return cb_collect_static_base(ctx, sizeof(ExportInfo), &info); +} + +static inline bool cb_collect_dynamic_ExportInfo(struct CollectBase *ctx, ExportInfo info) { + return cb_collect_dynamic_base(ctx, sizeof(ExportInfo), &info); +} + +static inline bool cb_count_ExportInfo(size_t *cnt, ExportInfo info) { + return ++(*cnt); +} + +static inline bool cb_collect_static_SectionInfo(struct CollectBase *ctx, SectionInfo info) { + return cb_collect_static_base(ctx, sizeof(SectionInfo), &info); +} + +static inline bool cb_collect_dynamic_SectionInfo(struct CollectBase *ctx, SectionInfo info) { + return cb_collect_dynamic_base(ctx, sizeof(SectionInfo), &info); +} + +static inline bool cb_count_SectionInfo(size_t *cnt, SectionInfo info) { + return ++(*cnt); +} + +static inline bool cb_collect_static_MemoryRange(struct CollectBase *ctx, MemoryRange info) { + return cb_collect_static_base(ctx, sizeof(MemoryRange), &info); +} + +static inline bool cb_collect_dynamic_MemoryRange(struct CollectBase *ctx, MemoryRange info) { + return cb_collect_dynamic_base(ctx, sizeof(MemoryRange), &info); +} + +static inline bool cb_count_MemoryRange(size_t *cnt, MemoryRange info) { + return ++(*cnt); +} + +static inline bool cb_collect_static_VirtualTranslation(struct CollectBase *ctx, VirtualTranslation info) { + return cb_collect_static_base(ctx, sizeof(VirtualTranslation), &info); +} + +static inline bool cb_collect_dynamic_VirtualTranslation(struct CollectBase *ctx, VirtualTranslation info) { + return cb_collect_dynamic_base(ctx, sizeof(VirtualTranslation), &info); +} + +static inline bool cb_count_VirtualTranslation(size_t *cnt, VirtualTranslation info) { + return ++(*cnt); +} + +static inline bool cb_collect_static_VirtualTranslationFail(struct CollectBase *ctx, VirtualTranslationFail info) { + return cb_collect_static_base(ctx, sizeof(VirtualTranslationFail), &info); +} + +static inline bool cb_collect_dynamic_VirtualTranslationFail(struct CollectBase *ctx, VirtualTranslationFail info) { + return cb_collect_dynamic_base(ctx, sizeof(VirtualTranslationFail), &info); +} + +static inline bool cb_count_VirtualTranslationFail(size_t *cnt, VirtualTranslationFail info) { + return ++(*cnt); +} + #ifdef __cplusplus } // extern "C" diff --git a/apex_dma/memflow_lib/memflow-ffi/memflow.hpp b/apex_dma/memflow_lib/memflow-ffi/memflow.hpp new file mode 100644 index 0000000..a3e2cea --- /dev/null +++ b/apex_dma/memflow_lib/memflow-ffi/memflow.hpp @@ -0,0 +1,3808 @@ +#ifndef MEMFLOW_H +#define MEMFLOW_H + +#include +#include +#include +#include +#include +#include +typedef void *Library; + +/** + * Identifies the byte order of a architecture + * + * This enum is used when reading/writing to/from the memory of a target system. + * The memory will be automatically converted to the endianess memflow is currently running on. + * + * See the [wikipedia article](https://en.wikipedia.org/wiki/Endianness) for more information on the subject. + */ +enum class Endianess : uint8_t { + /** + * Little Endianess + */ + Endianess_LittleEndian, + /** + * Big Endianess + */ + Endianess_BigEndian, +}; + +/** + * An enum representing the available verbosity levels of the logger. + * + * Typical usage includes: checking if a certain `Level` is enabled with + * [`log_enabled!`](macro.log_enabled.html), specifying the `Level` of + * [`log!`](macro.log.html), and comparing a `Level` directly to a + * [`LevelFilter`](enum.LevelFilter.html). + */ +enum class Level : uintptr_t { + /** + * The "error" level. + * + * Designates very serious errors. + */ + Level_Error = 1, + /** + * The "warn" level. + * + * Designates hazardous situations. + */ + Level_Warn, + /** + * The "info" level. + * + * Designates useful information. + */ + Level_Info, + /** + * The "debug" level. + * + * Designates lower priority information. + */ + Level_Debug, + /** + * The "trace" level. + * + * Designates very low priority, often extremely verbose, information. + */ + Level_Trace, +}; + +/** + * An enum representing the available verbosity level filters of the logger. + * + * A `LevelFilter` may be compared directly to a [`Level`]. Use this type + * to get and set the maximum log level with [`max_level()`] and [`set_max_level`]. + * + * [`Level`]: enum.Level.html + * [`max_level()`]: fn.max_level.html + * [`set_max_level`]: fn.set_max_level.html + */ +enum class LevelFilter : uintptr_t { + /** + * A level lower than all log levels. + */ + LevelFilter_Off, + /** + * Corresponds to the `Error` log level. + */ + LevelFilter_Error, + /** + * Corresponds to the `Warn` log level. + */ + LevelFilter_Warn, + /** + * Corresponds to the `Info` log level. + */ + LevelFilter_Info, + /** + * Corresponds to the `Debug` log level. + */ + LevelFilter_Debug, + /** + * Corresponds to the `Trace` log level. + */ + LevelFilter_Trace, +}; + +struct ArchitectureObj; + + +/** Destruct the object. */ +template +inline typename std::enable_if::value>::type mem_drop(T &&self) noexcept { + std::move(self).drop(); +} + +template +inline typename std::enable_if::value>::type mem_drop(T &&self) noexcept {} + +/** Forget the object's resources (null them out). */ +template +inline typename std::enable_if::value>::type mem_forget(T &self) noexcept { + self.forget(); +} + +template +inline typename std::enable_if::value>::type mem_forget(T &self) noexcept {} + +/** Defer mem_forget call when object goes out of scope. */ +template +struct DeferedForget { + T &val; + + DeferedForget(T &val) : val(val) {} + + ~DeferedForget() { + mem_forget(val); + } +}; + +/** Workaround for void types in generic functions. */ +struct StoreAll { + constexpr bool operator[](StoreAll) const { + return false; + } + + template + constexpr T && operator[](T &&t) const { + return std::forward(t); + } + + template + friend T && operator,(T &&t, StoreAll) { + return std::forward(t); + } +}; + +template +using CloneRetTmp = void; + +template +using ConnectorCpuStateRetTmp = void; + +template +using CpuStateRetTmp = void; + +/** + * The core of the plugin system + * + * It scans system directories and collects valid memflow plugins. They can then be instantiated + * easily. The reason the libraries are collected is to allow for reuse, and save performance + * + * # Examples + * + * Creating a OS instance, the recommended way: + * + * ```no_run + * use memflow::plugins::Inventory; + * # use memflow::plugins::OsInstanceArcBox; + * # use memflow::error::Result; + * # fn test() -> Result> { + * let inventory = Inventory::scan(); + * inventory + * .builder() + * .connector("qemu") + * .os("win32") + * .build() + * # } + * # test().ok(); + * ``` + * + * Nesting connectors and os plugins: + * ```no_run + * use memflow::plugins::{Inventory, Args}; + * # use memflow::error::Result; + * # fn test() -> Result<()> { + * let inventory = Inventory::scan(); + * let os = inventory + * .builder() + * .connector("qemu") + * .os("linux") + * .connector("qemu") + * .os("win32") + * .build(); + * # Ok(()) + * # } + * # test().ok(); + * ``` + */ +struct Inventory; + +template +using KeyboardRetTmp = void; + +template +using KeyboardStateRetTmp = void; + +template +struct alignas(alignof(T)) RustMaybeUninit { + char pad[sizeof(T)]; + inline T &assume_init() { + return *(T *)this; + } + constexpr const T &assume_init() const { + return *(const T *)this; + } +}; + +template +using MemoryViewRetTmp = void; + +template +using OsKeyboardRetTmp = void; + +template +using OsRetTmp = void; + +template +using PhysicalMemoryRetTmp = void; + +template +using ProcessRetTmp = void; + +template +using VirtualTranslateRetTmp = void; + +/** + * The largest target memory type + * The following core rule is defined for these memory types: + * + * `PAGE_SIZE < usize <= umem` + * + * Where `PAGE_SIZE` is any lowest granularity page size, `usize` is the standard size type, and + * `umem` is memflow's memory size type. + * + * This means that `usize` can always be safely cast to `umem`, while anything to do with page + * sizes can be cast to `umem` safely, + * + */ +using umem = uint64_t; + +/** + * This type represents a address on the target system. + * It internally holds a `umem` value but can also be used + * when working in 32-bit environments. + * + * This type will not handle overflow for 32-bit or 64-bit addresses / lengths. + */ +using Address = umem; +/** + * A address with the value of zero. + * + * # Examples + * + * ``` + * use memflow::types::Address; + * + * println!("address: {}", Address::NULL); + * ``` + */ +constexpr static const Address Address_NULL = 0; +/** + * A address with an invalid value. + * + * # Examples + * + * ``` + * use memflow::types::Address; + * + * println!("address: {}", Address::INVALID); + * ``` + */ +constexpr static const Address Address_INVALID = ~0; + +/** + * Describes the type of a page using a bitflag. + */ +using PageType = uint8_t; +/** + * The page explicitly has no flags. + */ +constexpr static const PageType PageType_NONE = 0; +/** + * The page type is not known. + */ +constexpr static const PageType PageType_UNKNOWN = 1; +/** + * The page contains page table entries. + */ +constexpr static const PageType PageType_PAGE_TABLE = 2; +/** + * The page is a writeable page. + */ +constexpr static const PageType PageType_WRITEABLE = 4; +/** + * The page is read only. + */ +constexpr static const PageType PageType_READ_ONLY = 8; +/** + * The page is not executable. + */ +constexpr static const PageType PageType_NOEXEC = 16; + +/** + * This type represents a wrapper over a [address](address/index.html) + * with additional information about the containing page in the physical memory domain. + * + * This type will mostly be used by the [virtual to physical address translation](todo.html). + * When a physical address is translated from a virtual address the additional information + * about the allocated page the virtual address points to can be obtained from this structure. + * + * Most architectures have support multiple page sizes (see [huge pages](todo.html)) + * which will be represented by the containing `page` of the `PhysicalAddress` struct. + */ +struct PhysicalAddress { + Address address; + PageType page_type; + uint8_t page_size_log2; +}; +/** + * A physical address with an invalid value. + */ +constexpr static const PhysicalAddress PhysicalAddress_INVALID = PhysicalAddress{ /* .address = */ Address_INVALID, /* .page_type = */ PageType_UNKNOWN, /* .page_size_log2 = */ 0 }; + +/** + * FFI-Safe Arc + * + * This is an FFI-Safe equivalent of Arc and Option>. + */ +template +struct CArc { + const T *instance; + const T *(*clone_fn)(const T*); + void (*drop_fn)(const T*); + + inline CArc clone() const noexcept { + CArc ret; + ret.instance = clone_fn(instance); + ret.clone_fn = clone_fn; + ret.drop_fn = drop_fn; + return ret; + } + + inline void drop() && noexcept { + if (drop_fn) + drop_fn(instance); + forget(); + } + + inline void forget() noexcept { + instance = nullptr; + clone_fn = nullptr; + drop_fn = nullptr; + } +}; + +/** + * FFI-safe box + * + * This box has a static self reference, alongside a custom drop function. + * + * The drop function can be called from anywhere, it will free on correct allocator internally. + */ +template +struct CBox { + T *instance; + void (*drop_fn)(T*); + + CBox() = default; + CBox(T *instance) : instance(instance), drop_fn(nullptr) {} + CBox(T *instance, void (*drop_fn)(T *)) : instance(instance), drop_fn(drop_fn) {} + template::value>::type, class = typename std::enable_if::value>::type> + CBox(U &&instance) : instance(new U(instance)), drop_fn(&CBox::delete_fn) {} + + static void delete_fn(T *v) { + delete v; + } + + inline operator CBox () const { + CBox ret; + ret.instance = (void*)instance; + ret.drop_fn = (void(*)(void *))drop_fn; + return ret; + } + + static inline CBox new_box() { + CBox ret; + ret.instance = new T; + ret.drop_fn = &CBox::delete_fn; + return ret; + } + + inline void drop() && noexcept { + if (drop_fn && instance) + drop_fn(instance); + forget(); + } + + inline void forget() noexcept { + instance = nullptr; + drop_fn = nullptr; + } + + inline T *operator->() { + return instance; + } + + inline const T *operator->() const { + return instance; + } +}; + +template, typename CGlueCtx = CArc> +struct ConnectorInstanceContainer { + typedef CGlueCtx Context; + CGlueInst instance; + CGlueCtx context; + + inline Context clone_context() noexcept { + return context.clone(); + } + + inline void drop() && noexcept { + mem_drop(std::move(instance)); + mem_drop(std::move(context)); + } + + inline void forget() noexcept { + mem_forget(instance); + mem_forget(context); + } +}; + +template +struct ConnectorInstanceContainer { + typedef void Context; + CGlueInst instance; + + inline Context clone_context() noexcept {} + + inline void drop() && noexcept { + mem_drop(std::move(instance)); + } + + inline void forget() noexcept { + mem_forget(instance); + } +}; + +/** + * CGlue vtable for trait Clone. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +template +struct CloneVtbl { + typedef typename CGlueC::Context Context; + CGlueC (*clone)(const CGlueC *cont); +}; + +template +struct CloneVtblImpl : CloneVtbl { +constexpr CloneVtblImpl() : + CloneVtbl { + &Impl::clone + } {} +}; + +/** + * Wrapper around mutable slices. + * + * This is meant as a safe type to pass across the FFI boundary with similar semantics as regular + * slice. However, not all functionality is present, use the slice conversion functions. + */ +template +struct CSliceMut { + T *data; + uintptr_t len; + + CSliceMut () = default; + + template::value + && std::is_same::value + >::type> + CSliceMut (Cont &data) : data(data.data()), len(data.size()) {} + + template::value || std::is_same::value) + && std::is_same::value + >::type> + CSliceMut (char *value) : data((T *)value), len(strlen(value)) {} + + template::value || std::is_same::value) + && std::is_same::value + >::type> + CSliceMut (char *value, uintptr_t len) : data((T *)value), len(len) {} + + template::value || std::is_same::value) + && std::is_same::value + >::type> + CSliceMut (std::string &value) : data((T *)value.data()), len(value.length()) {} + + template::value || std::is_same::value) + && std::is_same::value + >::type> + inline operator std::string() const { + return std::string((char *)data, len); + } +}; + +/** + * FFI-safe 3 element tuple. + */ +template +struct CTup3 { + A _0; + B _1; + C _2; +}; + +/** + * MemData type for physical memory reads. + */ +using PhysicalReadData = CTup3>; + +/** + * FFI-safe 2 element tuple. + */ +template +struct CTup2 { + A _0; + B _1; +}; + +using ReadData = CTup2>; + +/** + * FFI compatible iterator. + * + * Any mutable reference to an iterator can be converted to a `CIterator`. + * + * `CIterator` implements `Iterator`. + * + * # Examples + * + * Using [`AsCIterator`](AsCIterator) helper: + * + * ``` + * use cglue::iter::{CIterator, AsCIterator}; + * + * extern "C" fn sum_all(iter: CIterator) -> usize { + * iter.sum() + * } + * + * let mut iter = (0..10).map(|v| v * v); + * + * assert_eq!(sum_all(iter.as_citer()), 285); + * ``` + * + * Converting with `Into` trait: + * + * ``` + * use cglue::iter::{CIterator, AsCIterator}; + * + * extern "C" fn sum_all(iter: CIterator) -> usize { + * iter.sum() + * } + * + * let mut iter = (0..=10).map(|v| v * v); + * + * assert_eq!(sum_all((&mut iter).into()), 385); + * ``` + */ +template +struct CIterator { + void *iter; + int32_t (*func)(void*, T *out); + + class iterator : std::iterator { + CIterator *iter; + RustMaybeUninit data; + bool initialized = false; + bool end = false; + + public: + explicit iterator() : end(true) {} + + explicit iterator(CIterator *iter) : iter(iter) { + end = iter->func(iter->iter, &data.assume_init()); + } + + iterator &operator++() { + if (!iter || end) { + return *this; + } + + end = iter->func(iter->iter, &data.assume_init()); + + return *this; + } + + constexpr bool operator==(const iterator &other) const { + return (end && other.end) + || (!end && !other.end && data.assume_init() == other.data.assume_init()); + } + + constexpr bool operator!=(const iterator &other) const { + return !(*this == other); + } + + inline T &operator*() { + return data.assume_init(); + } + + constexpr const T &operator*() const { + return data.assume_init(); + } + }; + + constexpr iterator begin() { + return iterator(this); + } + + constexpr iterator end() { + return iterator(); + } +}; + +template +struct CPPIterator { + + typedef typename Container::iterator::value_type T; + + CIterator iter; + typename Container::iterator cur, end; + + static int32_t next(void *data, T *out) { + CPPIterator *i = (CPPIterator *)data; + + if (i->cur == i->end) { + return 1; + } else { + *out = *i->cur; + i->cur++; + return 0; + } + } + + CPPIterator(Container &cont) + : cur(cont.begin()), end(cont.end()) + { + iter.iter = &iter - offsetof(CPPIterator, iter); + iter.func = &CPPIterator::next; + } + + CPPIterator(CPPIterator &&o) { + iter = o.iter; + iter.iter = &this; + cur = o.cur; + end = o.end; + } + + CPPIterator(CPPIterator &o) { + iter = o.iter; + iter.iter = &this; + cur = o.cur; + end = o.end; + } + + inline operator CIterator &() { + return iter; + } +}; + +template +struct Callback { + T *context; + bool (*func)(T*, F); + + template + static bool push_back(Container *context, F data) { + context->push_back(data); + return true; + } + + template + static bool functional(Function *function, F data) { + return (*function)(data); + } + + Callback() = default; + + template().push_back(std::declval()))> + Callback(OT *cont) : + context((T *)cont), + func((decltype(func))(&Callback::push_back)) {} + + template()(std::declval()))> + Callback(const Function &function) : + context((T *)&function), + func((decltype(func))(&Callback::functional)) {} + + constexpr operator Callback &() { + return *((Callback *)this); + } +}; + +template +using OpaqueCallback = Callback; + +/** + * Data needed to perform memory operations. + * + * `inp` is an iterator containing + */ +template +struct MemOps { + CIterator inp; + OpaqueCallback

*out; + OpaqueCallback

*out_fail; +}; + +using PhysicalReadMemOps = MemOps; + +/** + * Wrapper around const slices. + * + * This is meant as a safe type to pass across the FFI boundary with similar semantics as regular + * slice. However, not all functionality is present, use the slice conversion functions. + * + * # Examples + * + * Simple conversion: + * + * ``` + * use cglue::slice::CSliceRef; + * + * let arr = [0, 5, 3, 2]; + * + * let cslice = CSliceRef::from(&arr[..]); + * + * let slice = cslice.as_slice(); + * + * assert_eq!(&arr, slice); + * ``` + */ +template +struct CSliceRef { + const T *data; + uintptr_t len; + + CSliceRef () = default; + + template::value + && std::is_same::value + >::type> + CSliceRef (const Cont &data) : data(data.data()), len(data.size()) {} + + template::value || std::is_same::value) + && std::is_same::value + >::type> + CSliceRef (const char *value) : data((const T *)value), len(strlen(value)) {} + + template::value || std::is_same::value) + && std::is_same::value + >::type> + CSliceRef (const char *value, uintptr_t len) : data((const T *)value), len(len) {} + + template::value || std::is_same::value) + && std::is_same::value + >::type> + CSliceRef (const std::string &value) : data((const T *)value.data()), len(value.length()) {} + + template::value || std::is_same::value) + && std::is_same::value + >::type> + inline operator std::string() const { + return std::string((char *)data, len); + } +}; + +/** + * MemData type for physical memory writes. + */ +using PhysicalWriteData = CTup3>; + +using WriteData = CTup2>; + +using PhysicalWriteMemOps = MemOps; + +struct PhysicalMemoryMetadata { + Address max_address; + umem real_size; + bool readonly; + uint32_t ideal_batch_size; +}; + +struct PhysicalMemoryMapping { + Address base; + umem size; + Address real_base; +}; + +/** + * Simple CGlue trait object container. + * + * This is the simplest form of container, represented by an instance, clone context, and + * temporary return context. + * + * `instance` value usually is either a reference, or a mutable reference, or a `CBox`, which + * contains static reference to the instance, and a dedicated drop function for freeing resources. + * + * `context` is either `PhantomData` representing nothing, or typically a `CArc` that can be + * cloned at will, reference counting some resource, like a `Library` for automatic unloading. + * + * `ret_tmp` is usually `PhantomData` representing nothing, unless the trait has functions that + * return references to associated types, in which case space is reserved for wrapping structures. + */ +template +struct CGlueObjContainer { + typedef C Context; + T instance; + C context; + RustMaybeUninit ret_tmp; + + inline Context clone_context() noexcept { + return context.clone(); + } + + inline void drop() && noexcept { + mem_drop(std::move(instance)); + mem_drop(std::move(context)); + } + + inline void forget() noexcept { + mem_forget(instance); + mem_forget(context); + } +}; + +template +struct CGlueObjContainer { + typedef void Context; + T instance; + RustMaybeUninit ret_tmp; + + inline Context clone_context() noexcept {} + + inline void drop() && noexcept { + mem_drop(std::move(instance)); + } + + inline void forget() noexcept { + mem_forget(instance); + } +}; + +template +struct CGlueObjContainer { + typedef C Context; + T instance; + C context; + + inline Context clone_context() noexcept { + return context.clone(); + } + + void drop() && noexcept { + mem_drop(std::move(instance)); + mem_drop(std::move(context)); + } + + void forget() noexcept { + mem_forget(instance); + mem_forget(context); + } +}; + +template +struct CGlueObjContainer { + typedef void Context; + T instance; + + inline Context clone_context() noexcept {} + + inline void drop() && noexcept { + mem_drop(std::move(instance)); + } + + inline void forget() noexcept { + mem_forget(instance); + } +}; + +/** + * MemData type for regular memory reads. + */ +using ReadDataRaw = CTup3>; + +using ReadRawMemOps = MemOps; + +/** + * MemData type for regular memory writes. + */ +using WriteDataRaw = CTup3>; + +using WriteRawMemOps = MemOps; + +struct MemoryViewMetadata { + Address max_address; + umem real_size; + bool readonly; + bool little_endian; + uint8_t arch_bits; +}; + +using ReadCallback = OpaqueCallback; + +using WriteCallback = OpaqueCallback; + +/** + * CGlue vtable for trait MemoryView. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +template +struct MemoryViewVtbl { + typedef typename CGlueC::Context Context; + int32_t (*read_raw_iter)(CGlueC *cont, ReadRawMemOps data); + int32_t (*write_raw_iter)(CGlueC *cont, WriteRawMemOps data); + MemoryViewMetadata (*metadata)(const CGlueC *cont); + int32_t (*read_iter)(CGlueC *cont, + CIterator inp, + ReadCallback *out, + ReadCallback *out_fail); + int32_t (*read_raw_list)(CGlueC *cont, CSliceMut data); + int32_t (*read_raw_into)(CGlueC *cont, Address addr, CSliceMut out); + int32_t (*write_iter)(CGlueC *cont, + CIterator inp, + WriteCallback *out, + WriteCallback *out_fail); + int32_t (*write_raw_list)(CGlueC *cont, CSliceRef data); + int32_t (*write_raw)(CGlueC *cont, Address addr, CSliceRef data); +}; + +template +struct MemoryViewVtblImpl : MemoryViewVtbl { +constexpr MemoryViewVtblImpl() : + MemoryViewVtbl { + &Impl::read_raw_iter, + &Impl::write_raw_iter, + &Impl::metadata, + &Impl::read_iter, + &Impl::read_raw_list, + &Impl::read_raw_into, + &Impl::write_iter, + &Impl::write_raw_list, + &Impl::write_raw + } {} +}; + +/** + * Simple CGlue trait object. + * + * This is the simplest form of CGlue object, represented by a container and vtable for a single + * trait. + * + * Container merely is a this pointer with some optional temporary return reference context. + */ +template +struct CGlueTraitObj { + const V *vtbl; + CGlueObjContainer container; +}; + +/** + * Base CGlue trait object for trait MemoryView. + */ +template, typename CGlueCtx = CArc> +using MemoryViewBase = CGlueTraitObj>>, CGlueCtx, MemoryViewRetTmp>; + +/** + * CGlue vtable for trait PhysicalMemory. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +template +struct PhysicalMemoryVtbl { + typedef typename CGlueC::Context Context; + int32_t (*phys_read_raw_iter)(CGlueC *cont, PhysicalReadMemOps data); + int32_t (*phys_write_raw_iter)(CGlueC *cont, PhysicalWriteMemOps data); + PhysicalMemoryMetadata (*metadata)(const CGlueC *cont); + void (*set_mem_map)(CGlueC *cont, CSliceRef _mem_map); + MemoryViewBase, Context> (*into_phys_view)(CGlueC cont); + MemoryViewBase, Context> (*phys_view)(CGlueC *cont); +}; + +template +struct PhysicalMemoryVtblImpl : PhysicalMemoryVtbl { +constexpr PhysicalMemoryVtblImpl() : + PhysicalMemoryVtbl { + &Impl::phys_read_raw_iter, + &Impl::phys_write_raw_iter, + &Impl::metadata, + &Impl::set_mem_map, + &Impl::into_phys_view, + &Impl::phys_view + } {} +}; + +/** + * CGlue vtable for trait CpuState. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +template +struct CpuStateVtbl { + typedef typename CGlueC::Context Context; + void (*pause)(CGlueC *cont); + void (*resume)(CGlueC *cont); +}; + +template +struct CpuStateVtblImpl : CpuStateVtbl { +constexpr CpuStateVtblImpl() : + CpuStateVtbl { + &Impl::pause, + &Impl::resume + } {} +}; + +/** + * Base CGlue trait object for trait CpuState. + */ +template, typename CGlueCtx = CArc> +using CpuStateBase = CGlueTraitObj>>, CGlueCtx, CpuStateRetTmp>; + +template, typename CGlueCtx = CArc> +struct IntoCpuStateContainer { + typedef CGlueCtx Context; + CGlueInst instance; + CGlueCtx context; + + inline Context clone_context() noexcept { + return context.clone(); + } + + inline void drop() && noexcept { + mem_drop(std::move(instance)); + mem_drop(std::move(context)); + } + + inline void forget() noexcept { + mem_forget(instance); + mem_forget(context); + } +}; + +template +struct IntoCpuStateContainer { + typedef void Context; + CGlueInst instance; + + inline Context clone_context() noexcept {} + + inline void drop() && noexcept { + mem_drop(std::move(instance)); + } + + inline void forget() noexcept { + mem_forget(instance); + } +}; + +/** + * Trait group potentially implementing `:: cglue :: ext :: core :: clone :: Clone < > + CpuState < >` traits. + * + * Optional traits are not implemented here, however. There are numerous conversion + * functions available for safely retrieving a concrete collection of traits. + * + * `check_impl_` functions allow to check if the object implements the wanted traits. + * + * `into_impl_` functions consume the object and produce a new final structure that + * keeps only the required information. + * + * `cast_impl_` functions merely check and transform the object into a type that can + *be transformed back into `IntoCpuState` without losing data. + * + * `as_ref_`, and `as_mut_` functions obtain references to safe objects, but do not + * perform any memory transformations either. They are the safest to use, because + * there is no risk of accidentally consuming the whole object. + */ +template, typename CGlueCtx = CArc> +struct IntoCpuState { + const CloneVtbl> *vtbl_clone; + const CpuStateVtbl> *vtbl_cpustate; + IntoCpuStateContainer container; + + IntoCpuState() : container{} , vtbl_clone{}, vtbl_cpustate{} {} + + ~IntoCpuState() noexcept { + mem_drop(std::move(container)); + } + + typedef CGlueCtx Context; + + inline IntoCpuState clone() const noexcept { + IntoCpuState __ret; + __ret.vtbl_clone = this->vtbl_clone; + __ret.vtbl_cpustate = this->vtbl_cpustate; + __ret.container = (this->vtbl_clone)->clone(&this->container); + return __ret; + } + + inline void pause() noexcept { + (this->vtbl_cpustate)->pause(&this->container); + + } + + inline void resume() noexcept { + (this->vtbl_cpustate)->resume(&this->container); + + } + +}; + +/** + * CGlue vtable for trait ConnectorCpuState. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +template +struct ConnectorCpuStateVtbl { + typedef typename CGlueC::Context Context; + int32_t (*cpu_state)(CGlueC *cont, CpuStateBase, Context> *ok_out); + int32_t (*into_cpu_state)(CGlueC cont, IntoCpuState, Context> *ok_out); +}; + +template +struct ConnectorCpuStateVtblImpl : ConnectorCpuStateVtbl { +constexpr ConnectorCpuStateVtblImpl() : + ConnectorCpuStateVtbl { + &Impl::cpu_state, + &Impl::into_cpu_state + } {} +}; + +/** + * Trait group potentially implementing `:: cglue :: ext :: core :: clone :: Clone < > + PhysicalMemory < > + ConnectorCpuState < >` traits. + * + * Optional traits are not implemented here, however. There are numerous conversion + * functions available for safely retrieving a concrete collection of traits. + * + * `check_impl_` functions allow to check if the object implements the wanted traits. + * + * `into_impl_` functions consume the object and produce a new final structure that + * keeps only the required information. + * + * `cast_impl_` functions merely check and transform the object into a type that can + *be transformed back into `ConnectorInstance` without losing data. + * + * `as_ref_`, and `as_mut_` functions obtain references to safe objects, but do not + * perform any memory transformations either. They are the safest to use, because + * there is no risk of accidentally consuming the whole object. + */ +template, typename CGlueCtx = CArc> +struct ConnectorInstance { + const CloneVtbl> *vtbl_clone; + const PhysicalMemoryVtbl> *vtbl_physicalmemory; + const ConnectorCpuStateVtbl> *vtbl_connectorcpustate; + ConnectorInstanceContainer container; + + ConnectorInstance() : container{} , vtbl_clone{}, vtbl_physicalmemory{}, vtbl_connectorcpustate{} {} + + ~ConnectorInstance() noexcept { + mem_drop(std::move(container)); + } + + typedef CGlueCtx Context; + + inline ConnectorInstance clone() const noexcept { + ConnectorInstance __ret; + __ret.vtbl_clone = this->vtbl_clone; + __ret.vtbl_physicalmemory = this->vtbl_physicalmemory; + __ret.vtbl_connectorcpustate = this->vtbl_connectorcpustate; + __ret.container = (this->vtbl_clone)->clone(&this->container); + return __ret; + } + + inline int32_t phys_read_raw_iter(PhysicalReadMemOps data) noexcept { + int32_t __ret = (this->vtbl_physicalmemory)->phys_read_raw_iter(&this->container, data); + return __ret; + } + + inline int32_t phys_write_raw_iter(PhysicalWriteMemOps data) noexcept { + int32_t __ret = (this->vtbl_physicalmemory)->phys_write_raw_iter(&this->container, data); + return __ret; + } + + inline PhysicalMemoryMetadata metadata() const noexcept { + PhysicalMemoryMetadata __ret = (this->vtbl_physicalmemory)->metadata(&this->container); + return __ret; + } + + inline void set_mem_map(CSliceRef _mem_map) noexcept { + (this->vtbl_physicalmemory)->set_mem_map(&this->container, _mem_map); + + } + + inline MemoryViewBase, Context> into_phys_view() && noexcept { + auto ___ctx = StoreAll()[this->container.clone_context(), StoreAll()]; + MemoryViewBase, Context> __ret = (this->vtbl_physicalmemory)->into_phys_view(this->container); + mem_forget(this->container); + return __ret; + } + + inline MemoryViewBase, Context> phys_view() noexcept { + MemoryViewBase, Context> __ret = (this->vtbl_physicalmemory)->phys_view(&this->container); + return __ret; + } + + inline int32_t cpu_state(CpuStateBase, Context> * ok_out) noexcept { + int32_t __ret = (this->vtbl_connectorcpustate)->cpu_state(&this->container, ok_out); + return __ret; + } + + inline int32_t into_cpu_state(IntoCpuState, Context> * ok_out) && noexcept { + auto ___ctx = StoreAll()[this->container.clone_context(), StoreAll()]; + int32_t __ret = (this->vtbl_connectorcpustate)->into_cpu_state(this->container, ok_out); + mem_forget(this->container); + return __ret; + } + +}; + +template> +using ConnectorInstanceBaseCtxBox = ConnectorInstance, CGlueCtx>; + +template +using ConnectorInstanceBaseArcBox = ConnectorInstanceBaseCtxBox>; +// Typedef for default contaienr and context type +template +using ConnectorInstanceBase = ConnectorInstanceBaseArcBox; + +using ConnectorInstanceArcBox = ConnectorInstanceBaseArcBox; + +using MuConnectorInstanceArcBox = ConnectorInstanceArcBox; +// Typedef for default contaienr and context type +using MuConnectorInstance = MuConnectorInstanceArcBox; + +template, typename CGlueCtx = CArc> +struct OsInstanceContainer { + typedef CGlueCtx Context; + CGlueInst instance; + CGlueCtx context; + + inline Context clone_context() noexcept { + return context.clone(); + } + + inline void drop() && noexcept { + mem_drop(std::move(instance)); + mem_drop(std::move(context)); + } + + inline void forget() noexcept { + mem_forget(instance); + mem_forget(context); + } +}; + +template +struct OsInstanceContainer { + typedef void Context; + CGlueInst instance; + + inline Context clone_context() noexcept {} + + inline void drop() && noexcept { + mem_drop(std::move(instance)); + } + + inline void forget() noexcept { + mem_forget(instance); + } +}; + +using AddressCallback = OpaqueCallback

; + +/** + * Type meant for process IDs + * + * If there is a case where Pid can be over 32-bit limit, or negative, please open an issue, we + * would love to see that. + */ +using Pid = uint32_t; + +/** + * Exit code of a process + */ +using ExitCode = int32_t; + +/** + * The state of a process + * + * # Remarks + * + * In case the exit code isn't known ProcessState::Unknown is set. + */ +struct ProcessState { + enum class Tag { + ProcessState_Unknown, + ProcessState_Alive, + ProcessState_Dead, + }; + + struct ProcessState_Dead_Body { + ExitCode _0; + }; + + Tag tag; + union { + ProcessState_Dead_Body dead; + }; +}; + +/** + * Wrapper around null-terminated C-style strings. + * + * Analog to Rust's `String`, [`ReprCString`] owns the underlying data. + */ +using ReprCString = char*; + +struct ArchitectureIdent { + enum class Tag { + /** + * Unknown architecture. Could be third-party implemented. memflow knows how to work on them, + * but is unable to instantiate them. + */ + ArchitectureIdent_Unknown, + /** + * X86 with specified bitness and address extensions + * + * First argument - `bitness` controls whether it's 32, or 64 bit variant. + * Second argument - `address_extensions` control whether address extensions are + * enabled (PAE on x32, or LA57 on x64). Warning: LA57 is currently unsupported. + */ + ArchitectureIdent_X86, + /** + * Arm 64-bit architecture with specified page size + * + * Valid page sizes are 4kb, 16kb, 64kb. Only 4kb is supported at the moment + */ + ArchitectureIdent_AArch64, + }; + + struct ArchitectureIdent_Unknown_Body { + uintptr_t _0; + }; + + struct ArchitectureIdent_X86_Body { + uint8_t _0; + bool _1; + }; + + struct ArchitectureIdent_AArch64_Body { + uintptr_t _0; + }; + + Tag tag; + union { + ArchitectureIdent_Unknown_Body unknown; + ArchitectureIdent_X86_Body x86; + ArchitectureIdent_AArch64_Body a_arch64; + }; +}; + +/** + * Process information structure + * + * This structure implements basic process information. Architectures are provided both of the + * system, and of the process. + */ +struct ProcessInfo { + /** + * The base address of this process. + * + * # Remarks + * + * On Windows this will be the address of the [`_EPROCESS`](https://www.nirsoft.net/kernel_struct/vista/EPROCESS.html) structure. + */ + Address address; + /** + * ID of this process. + */ + Pid pid; + /** + * The current status of the process at the time when this process info was fetched. + * + * # Remarks + * + * This field is highly volatile and can be re-checked with the [`Process::state()`] function. + */ + ProcessState state; + /** + * Name of the process. + */ + ReprCString name; + /** + * Path of the process binary + */ + ReprCString path; + /** + * Command line the process was started with. + */ + ReprCString command_line; + /** + * System architecture of the target system. + */ + ArchitectureIdent sys_arch; + /** + * Process architecture + * + * # Remarks + * + * Specifically on 64-bit systems this could be different + * to the `sys_arch` in case the process is an emulated 32-bit process. + * + * On windows this technique is called [`WOW64`](https://docs.microsoft.com/en-us/windows/win32/winprog64/wow64-implementation-details). + */ + ArchitectureIdent proc_arch; + /** + * Directory Table Base + * + * # Remarks + * + * These fields contain the translation base used to translate virtual memory addresses into physical memory addresses. + * On x86 systems only `dtb1` is set because only one dtb is used. + * On arm systems both `dtb1` and `dtb2` are set to their corresponding values. + */ + Address dtb1; + Address dtb2; +}; + +using ProcessInfoCallback = OpaqueCallback; + +template, typename CGlueCtx = CArc> +struct ProcessInstanceContainer { + typedef CGlueCtx Context; + CGlueInst instance; + CGlueCtx context; + + inline Context clone_context() noexcept { + return context.clone(); + } + + inline void drop() && noexcept { + mem_drop(std::move(instance)); + mem_drop(std::move(context)); + } + + inline void forget() noexcept { + mem_forget(instance); + mem_forget(context); + } +}; + +template +struct ProcessInstanceContainer { + typedef void Context; + CGlueInst instance; + + inline Context clone_context() noexcept {} + + inline void drop() && noexcept { + mem_drop(std::move(instance)); + } + + inline void forget() noexcept { + mem_forget(instance); + } +}; + +/** + * Pair of address and architecture used for callbacks + */ +struct ModuleAddressInfo { + Address address; + ArchitectureIdent arch; +}; + +using ModuleAddressCallback = OpaqueCallback; + +/** + * Module information structure + */ +struct ModuleInfo { + /** + * Returns the address of the module header. + * + * # Remarks + * + * On Windows this will be the address where the [`PEB`](https://docs.microsoft.com/en-us/windows/win32/api/winternl/ns-winternl-peb) entry is stored. + */ + Address address; + /** + * The base address of the parent process. + * + * # Remarks + * + * This field is analog to the `ProcessInfo::address` field. + */ + Address parent_process; + /** + * The actual base address of this module. + * + * # Remarks + * + * The base address is contained in the virtual address range of the process + * this module belongs to. + */ + Address base; + /** + * Size of the module + */ + umem size; + /** + * Name of the module + */ + ReprCString name; + /** + * Path of the module + */ + ReprCString path; + /** + * Architecture of the module + * + * # Remarks + * + * Emulated processes often have 2 separate lists of modules, one visible to the emulated + * context (e.g. all 32-bit modules in a WoW64 process), and the other for all native modules + * needed to support the process emulation. This should be equal to either + * `ProcessInfo::proc_arch`, or `ProcessInfo::sys_arch` of the parent process. + */ + ArchitectureIdent arch; +}; + +using ModuleInfoCallback = OpaqueCallback; + +/** + * Import information structure + */ +struct ImportInfo { + /** + * Name of the import + */ + ReprCString name; + /** + * Offset of this import from the containing modules base address + */ + umem offset; +}; + +using ImportCallback = OpaqueCallback; + +/** + * Export information structure + */ +struct ExportInfo { + /** + * Name of the export + */ + ReprCString name; + /** + * Offset of this export from the containing modules base address + */ + umem offset; +}; + +using ExportCallback = OpaqueCallback; + +/** + * Section information structure + */ +struct SectionInfo { + /** + * Name of the section + */ + ReprCString name; + /** + * Virtual address of this section (essentially module_info.base + virtual_address) + */ + Address base; + /** + * Size of this section + */ + umem size; +}; + +using SectionCallback = OpaqueCallback; + +using imem = int64_t; + +using MemoryRange = CTup3; + +using MemoryRangeCallback = OpaqueCallback; + +/** + * CGlue vtable for trait Process. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +template +struct ProcessVtbl { + typedef typename CGlueC::Context Context; + ProcessState (*state)(CGlueC *cont); + int32_t (*set_dtb)(CGlueC *cont, Address dtb1, Address dtb2); + int32_t (*module_address_list_callback)(CGlueC *cont, + const ArchitectureIdent *target_arch, + ModuleAddressCallback callback); + int32_t (*module_list_callback)(CGlueC *cont, + const ArchitectureIdent *target_arch, + ModuleInfoCallback callback); + int32_t (*module_by_address)(CGlueC *cont, + Address address, + ArchitectureIdent architecture, + ModuleInfo *ok_out); + int32_t (*module_by_name_arch)(CGlueC *cont, + CSliceRef name, + const ArchitectureIdent *architecture, + ModuleInfo *ok_out); + int32_t (*module_by_name)(CGlueC *cont, CSliceRef name, ModuleInfo *ok_out); + int32_t (*primary_module_address)(CGlueC *cont, Address *ok_out); + int32_t (*primary_module)(CGlueC *cont, ModuleInfo *ok_out); + int32_t (*module_import_list_callback)(CGlueC *cont, + const ModuleInfo *info, + ImportCallback callback); + int32_t (*module_export_list_callback)(CGlueC *cont, + const ModuleInfo *info, + ExportCallback callback); + int32_t (*module_section_list_callback)(CGlueC *cont, + const ModuleInfo *info, + SectionCallback callback); + int32_t (*module_import_by_name)(CGlueC *cont, + const ModuleInfo *info, + CSliceRef name, + ImportInfo *ok_out); + int32_t (*module_export_by_name)(CGlueC *cont, + const ModuleInfo *info, + CSliceRef name, + ExportInfo *ok_out); + int32_t (*module_section_by_name)(CGlueC *cont, + const ModuleInfo *info, + CSliceRef name, + SectionInfo *ok_out); + const ProcessInfo *(*info)(const CGlueC *cont); + void (*mapped_mem_range)(CGlueC *cont, + imem gap_size, + Address start, + Address end, + MemoryRangeCallback out); + void (*mapped_mem)(CGlueC *cont, imem gap_size, MemoryRangeCallback out); +}; + +template +struct ProcessVtblImpl : ProcessVtbl { +constexpr ProcessVtblImpl() : + ProcessVtbl { + &Impl::state, + &Impl::set_dtb, + &Impl::module_address_list_callback, + &Impl::module_list_callback, + &Impl::module_by_address, + &Impl::module_by_name_arch, + &Impl::module_by_name, + &Impl::primary_module_address, + &Impl::primary_module, + &Impl::module_import_list_callback, + &Impl::module_export_list_callback, + &Impl::module_section_list_callback, + &Impl::module_import_by_name, + &Impl::module_export_by_name, + &Impl::module_section_by_name, + &Impl::info, + &Impl::mapped_mem_range, + &Impl::mapped_mem + } {} +}; + +using VtopRange = CTup2; + +/** + * Virtual page range information with physical mappings used for callbacks + */ +struct VirtualTranslation { + Address in_virtual; + umem size; + PhysicalAddress out_physical; +}; + +using VirtualTranslationCallback = OpaqueCallback; + +struct VirtualTranslationFail { + Address from; + umem size; +}; + +using VirtualTranslationFailCallback = OpaqueCallback; + +/** + * A `Page` holds information about a memory page. + * + * More information about paging can be found [here](https://en.wikipedia.org/wiki/Paging). + */ +struct Page { + /** + * Contains the page type (see above). + */ + PageType page_type; + /** + * Contains the base address of this page. + */ + Address page_base; + /** + * Contains the size of this page. + */ + umem page_size; +}; +/** + * A page object that is invalid. + */ +constexpr static const Page Page_INVALID = Page{ /* .page_type = */ PageType_UNKNOWN, /* .page_base = */ Address_INVALID, /* .page_size = */ 0 }; + +/** + * FFI-safe Option. + * + * This type is not really meant for general use, but rather as a last-resort conversion for type + * wrapping. + * + * Typical workflow would include temporarily converting into/from COption. + */ +template +struct COption { + enum class Tag { + COption_None, + COption_Some, + }; + + struct COption_Some_Body { + T _0; + }; + + Tag tag; + union { + COption_Some_Body some; + }; +}; + +/** + * CGlue vtable for trait VirtualTranslate. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +template +struct VirtualTranslateVtbl { + typedef typename CGlueC::Context Context; + void (*virt_to_phys_list)(CGlueC *cont, + CSliceRef addrs, + VirtualTranslationCallback out, + VirtualTranslationFailCallback out_fail); + void (*virt_to_phys_range)(CGlueC *cont, + Address start, + Address end, + VirtualTranslationCallback out); + void (*virt_translation_map_range)(CGlueC *cont, + Address start, + Address end, + VirtualTranslationCallback out); + void (*virt_page_map_range)(CGlueC *cont, + imem gap_size, + Address start, + Address end, + MemoryRangeCallback out); + int32_t (*virt_to_phys)(CGlueC *cont, Address address, PhysicalAddress *ok_out); + int32_t (*virt_page_info)(CGlueC *cont, Address addr, Page *ok_out); + void (*virt_translation_map)(CGlueC *cont, VirtualTranslationCallback out); + COption
(*phys_to_virt)(CGlueC *cont, Address phys); + void (*virt_page_map)(CGlueC *cont, imem gap_size, MemoryRangeCallback out); +}; + +template +struct VirtualTranslateVtblImpl : VirtualTranslateVtbl { +constexpr VirtualTranslateVtblImpl() : + VirtualTranslateVtbl { + &Impl::virt_to_phys_list, + &Impl::virt_to_phys_range, + &Impl::virt_translation_map_range, + &Impl::virt_page_map_range, + &Impl::virt_to_phys, + &Impl::virt_page_info, + &Impl::virt_translation_map, + &Impl::phys_to_virt, + &Impl::virt_page_map + } {} +}; + +/** + * Trait group potentially implementing `MemoryView < > + Process < > + VirtualTranslate < >` traits. + * + * Optional traits are not implemented here, however. There are numerous conversion + * functions available for safely retrieving a concrete collection of traits. + * + * `check_impl_` functions allow to check if the object implements the wanted traits. + * + * `into_impl_` functions consume the object and produce a new final structure that + * keeps only the required information. + * + * `cast_impl_` functions merely check and transform the object into a type that can + *be transformed back into `ProcessInstance` without losing data. + * + * `as_ref_`, and `as_mut_` functions obtain references to safe objects, but do not + * perform any memory transformations either. They are the safest to use, because + * there is no risk of accidentally consuming the whole object. + */ +template, typename CGlueCtx = CArc> +struct ProcessInstance { + const MemoryViewVtbl> *vtbl_memoryview; + const ProcessVtbl> *vtbl_process; + const VirtualTranslateVtbl> *vtbl_virtualtranslate; + ProcessInstanceContainer container; + + ProcessInstance() : container{} , vtbl_memoryview{}, vtbl_process{}, vtbl_virtualtranslate{} {} + + ~ProcessInstance() noexcept { + mem_drop(std::move(container)); + } + + typedef CGlueCtx Context; + + inline int32_t read_raw_iter(ReadRawMemOps data) noexcept { + int32_t __ret = (this->vtbl_memoryview)->read_raw_iter(&this->container, data); + return __ret; + } + + inline int32_t write_raw_iter(WriteRawMemOps data) noexcept { + int32_t __ret = (this->vtbl_memoryview)->write_raw_iter(&this->container, data); + return __ret; + } + + inline MemoryViewMetadata metadata() const noexcept { + MemoryViewMetadata __ret = (this->vtbl_memoryview)->metadata(&this->container); + return __ret; + } + + inline int32_t read_iter(CIterator inp, ReadCallback * out, ReadCallback * out_fail) noexcept { + int32_t __ret = (this->vtbl_memoryview)->read_iter(&this->container, inp, out, out_fail); + return __ret; + } + + inline int32_t read_raw_list(CSliceMut data) noexcept { + int32_t __ret = (this->vtbl_memoryview)->read_raw_list(&this->container, data); + return __ret; + } + + inline int32_t read_raw_into(Address addr, CSliceMut out) noexcept { + int32_t __ret = (this->vtbl_memoryview)->read_raw_into(&this->container, addr, out); + return __ret; + } + + inline int32_t write_iter(CIterator inp, WriteCallback * out, WriteCallback * out_fail) noexcept { + int32_t __ret = (this->vtbl_memoryview)->write_iter(&this->container, inp, out, out_fail); + return __ret; + } + + inline int32_t write_raw_list(CSliceRef data) noexcept { + int32_t __ret = (this->vtbl_memoryview)->write_raw_list(&this->container, data); + return __ret; + } + + inline int32_t write_raw(Address addr, CSliceRef data) noexcept { + int32_t __ret = (this->vtbl_memoryview)->write_raw(&this->container, addr, data); + return __ret; + } + + inline ProcessState state() noexcept { + ProcessState __ret = (this->vtbl_process)->state(&this->container); + return __ret; + } + + inline int32_t set_dtb(Address dtb1, Address dtb2) noexcept { + int32_t __ret = (this->vtbl_process)->set_dtb(&this->container, dtb1, dtb2); + return __ret; + } + + inline int32_t module_address_list_callback(const ArchitectureIdent * target_arch, ModuleAddressCallback callback) noexcept { + int32_t __ret = (this->vtbl_process)->module_address_list_callback(&this->container, target_arch, callback); + return __ret; + } + + inline int32_t module_list_callback(const ArchitectureIdent * target_arch, ModuleInfoCallback callback) noexcept { + int32_t __ret = (this->vtbl_process)->module_list_callback(&this->container, target_arch, callback); + return __ret; + } + + inline int32_t module_by_address(Address address, ArchitectureIdent architecture, ModuleInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl_process)->module_by_address(&this->container, address, architecture, ok_out); + return __ret; + } + + inline int32_t module_by_name_arch(CSliceRef name, const ArchitectureIdent * architecture, ModuleInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl_process)->module_by_name_arch(&this->container, name, architecture, ok_out); + return __ret; + } + + inline int32_t module_by_name(CSliceRef name, ModuleInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl_process)->module_by_name(&this->container, name, ok_out); + return __ret; + } + + inline int32_t primary_module_address(Address * ok_out) noexcept { + int32_t __ret = (this->vtbl_process)->primary_module_address(&this->container, ok_out); + return __ret; + } + + inline int32_t primary_module(ModuleInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl_process)->primary_module(&this->container, ok_out); + return __ret; + } + + inline int32_t module_import_list_callback(const ModuleInfo * info, ImportCallback callback) noexcept { + int32_t __ret = (this->vtbl_process)->module_import_list_callback(&this->container, info, callback); + return __ret; + } + + inline int32_t module_export_list_callback(const ModuleInfo * info, ExportCallback callback) noexcept { + int32_t __ret = (this->vtbl_process)->module_export_list_callback(&this->container, info, callback); + return __ret; + } + + inline int32_t module_section_list_callback(const ModuleInfo * info, SectionCallback callback) noexcept { + int32_t __ret = (this->vtbl_process)->module_section_list_callback(&this->container, info, callback); + return __ret; + } + + inline int32_t module_import_by_name(const ModuleInfo * info, CSliceRef name, ImportInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl_process)->module_import_by_name(&this->container, info, name, ok_out); + return __ret; + } + + inline int32_t module_export_by_name(const ModuleInfo * info, CSliceRef name, ExportInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl_process)->module_export_by_name(&this->container, info, name, ok_out); + return __ret; + } + + inline int32_t module_section_by_name(const ModuleInfo * info, CSliceRef name, SectionInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl_process)->module_section_by_name(&this->container, info, name, ok_out); + return __ret; + } + + inline const ProcessInfo * info() const noexcept { + const ProcessInfo * __ret = (this->vtbl_process)->info(&this->container); + return __ret; + } + + inline void mapped_mem_range(imem gap_size, Address start, Address end, MemoryRangeCallback out) noexcept { + (this->vtbl_process)->mapped_mem_range(&this->container, gap_size, start, end, out); + + } + + inline void mapped_mem(imem gap_size, MemoryRangeCallback out) noexcept { + (this->vtbl_process)->mapped_mem(&this->container, gap_size, out); + + } + + inline void virt_to_phys_list(CSliceRef addrs, VirtualTranslationCallback out, VirtualTranslationFailCallback out_fail) noexcept { + (this->vtbl_virtualtranslate)->virt_to_phys_list(&this->container, addrs, out, out_fail); + + } + + inline void virt_to_phys_range(Address start, Address end, VirtualTranslationCallback out) noexcept { + (this->vtbl_virtualtranslate)->virt_to_phys_range(&this->container, start, end, out); + + } + + inline void virt_translation_map_range(Address start, Address end, VirtualTranslationCallback out) noexcept { + (this->vtbl_virtualtranslate)->virt_translation_map_range(&this->container, start, end, out); + + } + + inline void virt_page_map_range(imem gap_size, Address start, Address end, MemoryRangeCallback out) noexcept { + (this->vtbl_virtualtranslate)->virt_page_map_range(&this->container, gap_size, start, end, out); + + } + + inline int32_t virt_to_phys(Address address, PhysicalAddress * ok_out) noexcept { + int32_t __ret = (this->vtbl_virtualtranslate)->virt_to_phys(&this->container, address, ok_out); + return __ret; + } + + inline int32_t virt_page_info(Address addr, Page * ok_out) noexcept { + int32_t __ret = (this->vtbl_virtualtranslate)->virt_page_info(&this->container, addr, ok_out); + return __ret; + } + + inline void virt_translation_map(VirtualTranslationCallback out) noexcept { + (this->vtbl_virtualtranslate)->virt_translation_map(&this->container, out); + + } + + inline COption
phys_to_virt(Address phys) noexcept { + COption
__ret = (this->vtbl_virtualtranslate)->phys_to_virt(&this->container, phys); + return __ret; + } + + inline void virt_page_map(imem gap_size, MemoryRangeCallback out) noexcept { + (this->vtbl_virtualtranslate)->virt_page_map(&this->container, gap_size, out); + + } + +}; + +template, typename CGlueCtx = CArc> +struct IntoProcessInstanceContainer { + typedef CGlueCtx Context; + CGlueInst instance; + CGlueCtx context; + + inline Context clone_context() noexcept { + return context.clone(); + } + + inline void drop() && noexcept { + mem_drop(std::move(instance)); + mem_drop(std::move(context)); + } + + inline void forget() noexcept { + mem_forget(instance); + mem_forget(context); + } +}; + +template +struct IntoProcessInstanceContainer { + typedef void Context; + CGlueInst instance; + + inline Context clone_context() noexcept {} + + inline void drop() && noexcept { + mem_drop(std::move(instance)); + } + + inline void forget() noexcept { + mem_forget(instance); + } +}; + +/** + * Trait group potentially implementing `:: cglue :: ext :: core :: clone :: Clone < > + MemoryView < > + Process < > + VirtualTranslate < >` traits. + * + * Optional traits are not implemented here, however. There are numerous conversion + * functions available for safely retrieving a concrete collection of traits. + * + * `check_impl_` functions allow to check if the object implements the wanted traits. + * + * `into_impl_` functions consume the object and produce a new final structure that + * keeps only the required information. + * + * `cast_impl_` functions merely check and transform the object into a type that can + *be transformed back into `IntoProcessInstance` without losing data. + * + * `as_ref_`, and `as_mut_` functions obtain references to safe objects, but do not + * perform any memory transformations either. They are the safest to use, because + * there is no risk of accidentally consuming the whole object. + */ +template, typename CGlueCtx = CArc> +struct IntoProcessInstance { + const CloneVtbl> *vtbl_clone; + const MemoryViewVtbl> *vtbl_memoryview; + const ProcessVtbl> *vtbl_process; + const VirtualTranslateVtbl> *vtbl_virtualtranslate; + IntoProcessInstanceContainer container; + + IntoProcessInstance() : container{} , vtbl_clone{}, vtbl_memoryview{}, vtbl_process{}, vtbl_virtualtranslate{} {} + + ~IntoProcessInstance() noexcept { + mem_drop(std::move(container)); + } + + typedef CGlueCtx Context; + + inline IntoProcessInstance clone() const noexcept { + IntoProcessInstance __ret; + __ret.vtbl_clone = this->vtbl_clone; + __ret.vtbl_memoryview = this->vtbl_memoryview; + __ret.vtbl_process = this->vtbl_process; + __ret.vtbl_virtualtranslate = this->vtbl_virtualtranslate; + __ret.container = (this->vtbl_clone)->clone(&this->container); + return __ret; + } + + inline int32_t read_raw_iter(ReadRawMemOps data) noexcept { + int32_t __ret = (this->vtbl_memoryview)->read_raw_iter(&this->container, data); + return __ret; + } + + inline int32_t write_raw_iter(WriteRawMemOps data) noexcept { + int32_t __ret = (this->vtbl_memoryview)->write_raw_iter(&this->container, data); + return __ret; + } + + inline MemoryViewMetadata metadata() const noexcept { + MemoryViewMetadata __ret = (this->vtbl_memoryview)->metadata(&this->container); + return __ret; + } + + inline int32_t read_iter(CIterator inp, ReadCallback * out, ReadCallback * out_fail) noexcept { + int32_t __ret = (this->vtbl_memoryview)->read_iter(&this->container, inp, out, out_fail); + return __ret; + } + + inline int32_t read_raw_list(CSliceMut data) noexcept { + int32_t __ret = (this->vtbl_memoryview)->read_raw_list(&this->container, data); + return __ret; + } + + inline int32_t read_raw_into(Address addr, CSliceMut out) noexcept { + int32_t __ret = (this->vtbl_memoryview)->read_raw_into(&this->container, addr, out); + return __ret; + } + + inline int32_t write_iter(CIterator inp, WriteCallback * out, WriteCallback * out_fail) noexcept { + int32_t __ret = (this->vtbl_memoryview)->write_iter(&this->container, inp, out, out_fail); + return __ret; + } + + inline int32_t write_raw_list(CSliceRef data) noexcept { + int32_t __ret = (this->vtbl_memoryview)->write_raw_list(&this->container, data); + return __ret; + } + + inline int32_t write_raw(Address addr, CSliceRef data) noexcept { + int32_t __ret = (this->vtbl_memoryview)->write_raw(&this->container, addr, data); + return __ret; + } + + inline ProcessState state() noexcept { + ProcessState __ret = (this->vtbl_process)->state(&this->container); + return __ret; + } + + inline int32_t set_dtb(Address dtb1, Address dtb2) noexcept { + int32_t __ret = (this->vtbl_process)->set_dtb(&this->container, dtb1, dtb2); + return __ret; + } + + inline int32_t module_address_list_callback(const ArchitectureIdent * target_arch, ModuleAddressCallback callback) noexcept { + int32_t __ret = (this->vtbl_process)->module_address_list_callback(&this->container, target_arch, callback); + return __ret; + } + + inline int32_t module_list_callback(const ArchitectureIdent * target_arch, ModuleInfoCallback callback) noexcept { + int32_t __ret = (this->vtbl_process)->module_list_callback(&this->container, target_arch, callback); + return __ret; + } + + inline int32_t module_by_address(Address address, ArchitectureIdent architecture, ModuleInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl_process)->module_by_address(&this->container, address, architecture, ok_out); + return __ret; + } + + inline int32_t module_by_name_arch(CSliceRef name, const ArchitectureIdent * architecture, ModuleInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl_process)->module_by_name_arch(&this->container, name, architecture, ok_out); + return __ret; + } + + inline int32_t module_by_name(CSliceRef name, ModuleInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl_process)->module_by_name(&this->container, name, ok_out); + return __ret; + } + + inline int32_t primary_module_address(Address * ok_out) noexcept { + int32_t __ret = (this->vtbl_process)->primary_module_address(&this->container, ok_out); + return __ret; + } + + inline int32_t primary_module(ModuleInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl_process)->primary_module(&this->container, ok_out); + return __ret; + } + + inline int32_t module_import_list_callback(const ModuleInfo * info, ImportCallback callback) noexcept { + int32_t __ret = (this->vtbl_process)->module_import_list_callback(&this->container, info, callback); + return __ret; + } + + inline int32_t module_export_list_callback(const ModuleInfo * info, ExportCallback callback) noexcept { + int32_t __ret = (this->vtbl_process)->module_export_list_callback(&this->container, info, callback); + return __ret; + } + + inline int32_t module_section_list_callback(const ModuleInfo * info, SectionCallback callback) noexcept { + int32_t __ret = (this->vtbl_process)->module_section_list_callback(&this->container, info, callback); + return __ret; + } + + inline int32_t module_import_by_name(const ModuleInfo * info, CSliceRef name, ImportInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl_process)->module_import_by_name(&this->container, info, name, ok_out); + return __ret; + } + + inline int32_t module_export_by_name(const ModuleInfo * info, CSliceRef name, ExportInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl_process)->module_export_by_name(&this->container, info, name, ok_out); + return __ret; + } + + inline int32_t module_section_by_name(const ModuleInfo * info, CSliceRef name, SectionInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl_process)->module_section_by_name(&this->container, info, name, ok_out); + return __ret; + } + + inline const ProcessInfo * info() const noexcept { + const ProcessInfo * __ret = (this->vtbl_process)->info(&this->container); + return __ret; + } + + inline void mapped_mem_range(imem gap_size, Address start, Address end, MemoryRangeCallback out) noexcept { + (this->vtbl_process)->mapped_mem_range(&this->container, gap_size, start, end, out); + + } + + inline void mapped_mem(imem gap_size, MemoryRangeCallback out) noexcept { + (this->vtbl_process)->mapped_mem(&this->container, gap_size, out); + + } + + inline void virt_to_phys_list(CSliceRef addrs, VirtualTranslationCallback out, VirtualTranslationFailCallback out_fail) noexcept { + (this->vtbl_virtualtranslate)->virt_to_phys_list(&this->container, addrs, out, out_fail); + + } + + inline void virt_to_phys_range(Address start, Address end, VirtualTranslationCallback out) noexcept { + (this->vtbl_virtualtranslate)->virt_to_phys_range(&this->container, start, end, out); + + } + + inline void virt_translation_map_range(Address start, Address end, VirtualTranslationCallback out) noexcept { + (this->vtbl_virtualtranslate)->virt_translation_map_range(&this->container, start, end, out); + + } + + inline void virt_page_map_range(imem gap_size, Address start, Address end, MemoryRangeCallback out) noexcept { + (this->vtbl_virtualtranslate)->virt_page_map_range(&this->container, gap_size, start, end, out); + + } + + inline int32_t virt_to_phys(Address address, PhysicalAddress * ok_out) noexcept { + int32_t __ret = (this->vtbl_virtualtranslate)->virt_to_phys(&this->container, address, ok_out); + return __ret; + } + + inline int32_t virt_page_info(Address addr, Page * ok_out) noexcept { + int32_t __ret = (this->vtbl_virtualtranslate)->virt_page_info(&this->container, addr, ok_out); + return __ret; + } + + inline void virt_translation_map(VirtualTranslationCallback out) noexcept { + (this->vtbl_virtualtranslate)->virt_translation_map(&this->container, out); + + } + + inline COption
phys_to_virt(Address phys) noexcept { + COption
__ret = (this->vtbl_virtualtranslate)->phys_to_virt(&this->container, phys); + return __ret; + } + + inline void virt_page_map(imem gap_size, MemoryRangeCallback out) noexcept { + (this->vtbl_virtualtranslate)->virt_page_map(&this->container, gap_size, out); + + } + +}; + +/** + * Information block about OS + * + * This provides some basic information about the OS in question. `base`, and `size` may be + * omitted in some circumstances (lack of kernel, or privileges). But architecture should always + * be correct. + */ +struct OsInfo { + /** + * Base address of the OS kernel + */ + Address base; + /** + * Size of the OS kernel + */ + umem size; + /** + * System architecture + */ + ArchitectureIdent arch; +}; + +/** + * CGlue vtable for trait Os. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +template +struct OsVtbl { + typedef typename CGlueC::Context Context; + int32_t (*process_address_list_callback)(CGlueC *cont, AddressCallback callback); + int32_t (*process_info_list_callback)(CGlueC *cont, ProcessInfoCallback callback); + int32_t (*process_info_by_address)(CGlueC *cont, + Address address, + ProcessInfo *ok_out); + int32_t (*process_info_by_name)(CGlueC *cont, + CSliceRef name, + ProcessInfo *ok_out); + int32_t (*process_info_by_pid)(CGlueC *cont, Pid pid, ProcessInfo *ok_out); + int32_t (*process_by_info)(CGlueC *cont, + ProcessInfo info, + ProcessInstance, Context> *ok_out); + int32_t (*into_process_by_info)(CGlueC cont, + ProcessInfo info, + IntoProcessInstance, Context> *ok_out); + int32_t (*process_by_address)(CGlueC *cont, + Address addr, + ProcessInstance, Context> *ok_out); + int32_t (*process_by_name)(CGlueC *cont, + CSliceRef name, + ProcessInstance, Context> *ok_out); + int32_t (*process_by_pid)(CGlueC *cont, + Pid pid, + ProcessInstance, Context> *ok_out); + int32_t (*into_process_by_address)(CGlueC cont, + Address addr, + IntoProcessInstance, Context> *ok_out); + int32_t (*into_process_by_name)(CGlueC cont, + CSliceRef name, + IntoProcessInstance, Context> *ok_out); + int32_t (*into_process_by_pid)(CGlueC cont, + Pid pid, + IntoProcessInstance, Context> *ok_out); + int32_t (*module_address_list_callback)(CGlueC *cont, AddressCallback callback); + int32_t (*module_list_callback)(CGlueC *cont, ModuleInfoCallback callback); + int32_t (*module_by_address)(CGlueC *cont, Address address, ModuleInfo *ok_out); + int32_t (*module_by_name)(CGlueC *cont, CSliceRef name, ModuleInfo *ok_out); + int32_t (*primary_module_address)(CGlueC *cont, Address *ok_out); + int32_t (*primary_module)(CGlueC *cont, ModuleInfo *ok_out); + int32_t (*module_import_list_callback)(CGlueC *cont, + const ModuleInfo *info, + ImportCallback callback); + int32_t (*module_export_list_callback)(CGlueC *cont, + const ModuleInfo *info, + ExportCallback callback); + int32_t (*module_section_list_callback)(CGlueC *cont, + const ModuleInfo *info, + SectionCallback callback); + int32_t (*module_import_by_name)(CGlueC *cont, + const ModuleInfo *info, + CSliceRef name, + ImportInfo *ok_out); + int32_t (*module_export_by_name)(CGlueC *cont, + const ModuleInfo *info, + CSliceRef name, + ExportInfo *ok_out); + int32_t (*module_section_by_name)(CGlueC *cont, + const ModuleInfo *info, + CSliceRef name, + SectionInfo *ok_out); + const OsInfo *(*info)(const CGlueC *cont); +}; + +template +struct OsVtblImpl : OsVtbl { +constexpr OsVtblImpl() : + OsVtbl { + &Impl::process_address_list_callback, + &Impl::process_info_list_callback, + &Impl::process_info_by_address, + &Impl::process_info_by_name, + &Impl::process_info_by_pid, + &Impl::process_by_info, + &Impl::into_process_by_info, + &Impl::process_by_address, + &Impl::process_by_name, + &Impl::process_by_pid, + &Impl::into_process_by_address, + &Impl::into_process_by_name, + &Impl::into_process_by_pid, + &Impl::module_address_list_callback, + &Impl::module_list_callback, + &Impl::module_by_address, + &Impl::module_by_name, + &Impl::primary_module_address, + &Impl::primary_module, + &Impl::module_import_list_callback, + &Impl::module_export_list_callback, + &Impl::module_section_list_callback, + &Impl::module_import_by_name, + &Impl::module_export_by_name, + &Impl::module_section_by_name, + &Impl::info + } {} +}; + +/** + * CGlue vtable for trait KeyboardState. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +template +struct KeyboardStateVtbl { + typedef typename CGlueC::Context Context; + bool (*is_down)(const CGlueC *cont, int32_t vk); +}; + +template +struct KeyboardStateVtblImpl : KeyboardStateVtbl { +constexpr KeyboardStateVtblImpl() : + KeyboardStateVtbl { + &Impl::is_down + } {} +}; + +/** + * Base CGlue trait object for trait KeyboardState. + */ +template, typename CGlueCtx = CArc> +using KeyboardStateBase = CGlueTraitObj>>, CGlueCtx, KeyboardStateRetTmp>; + +/** + * CGlue vtable for trait Keyboard. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +template +struct KeyboardVtbl { + typedef typename CGlueC::Context Context; + bool (*is_down)(CGlueC *cont, int32_t vk); + void (*set_down)(CGlueC *cont, int32_t vk, bool down); + int32_t (*state)(CGlueC *cont, KeyboardStateBase, Context> *ok_out); +}; + +template +struct KeyboardVtblImpl : KeyboardVtbl { +constexpr KeyboardVtblImpl() : + KeyboardVtbl { + &Impl::is_down, + &Impl::set_down, + &Impl::state + } {} +}; + +/** + * Base CGlue trait object for trait Keyboard. + */ +template, typename CGlueCtx = CArc> +using KeyboardBase = CGlueTraitObj>>, CGlueCtx, KeyboardRetTmp>; + +template, typename CGlueCtx = CArc> +struct IntoKeyboardContainer { + typedef CGlueCtx Context; + CGlueInst instance; + CGlueCtx context; + + inline Context clone_context() noexcept { + return context.clone(); + } + + inline void drop() && noexcept { + mem_drop(std::move(instance)); + mem_drop(std::move(context)); + } + + inline void forget() noexcept { + mem_forget(instance); + mem_forget(context); + } +}; + +template +struct IntoKeyboardContainer { + typedef void Context; + CGlueInst instance; + + inline Context clone_context() noexcept {} + + inline void drop() && noexcept { + mem_drop(std::move(instance)); + } + + inline void forget() noexcept { + mem_forget(instance); + } +}; + +/** + * Trait group potentially implementing `:: cglue :: ext :: core :: clone :: Clone < > + Keyboard < >` traits. + * + * Optional traits are not implemented here, however. There are numerous conversion + * functions available for safely retrieving a concrete collection of traits. + * + * `check_impl_` functions allow to check if the object implements the wanted traits. + * + * `into_impl_` functions consume the object and produce a new final structure that + * keeps only the required information. + * + * `cast_impl_` functions merely check and transform the object into a type that can + *be transformed back into `IntoKeyboard` without losing data. + * + * `as_ref_`, and `as_mut_` functions obtain references to safe objects, but do not + * perform any memory transformations either. They are the safest to use, because + * there is no risk of accidentally consuming the whole object. + */ +template, typename CGlueCtx = CArc> +struct IntoKeyboard { + const CloneVtbl> *vtbl_clone; + const KeyboardVtbl> *vtbl_keyboard; + IntoKeyboardContainer container; + + IntoKeyboard() : container{} , vtbl_clone{}, vtbl_keyboard{} {} + + ~IntoKeyboard() noexcept { + mem_drop(std::move(container)); + } + + typedef CGlueCtx Context; + + inline IntoKeyboard clone() const noexcept { + IntoKeyboard __ret; + __ret.vtbl_clone = this->vtbl_clone; + __ret.vtbl_keyboard = this->vtbl_keyboard; + __ret.container = (this->vtbl_clone)->clone(&this->container); + return __ret; + } + + inline bool is_down(int32_t vk) noexcept { + bool __ret = (this->vtbl_keyboard)->is_down(&this->container, vk); + return __ret; + } + + inline void set_down(int32_t vk, bool down) noexcept { + (this->vtbl_keyboard)->set_down(&this->container, vk, down); + + } + + inline int32_t state(KeyboardStateBase, Context> * ok_out) noexcept { + int32_t __ret = (this->vtbl_keyboard)->state(&this->container, ok_out); + return __ret; + } + +}; + +/** + * CGlue vtable for trait OsKeyboard. + * + * This virtual function table contains ABI-safe interface for the given trait. + */ +template +struct OsKeyboardVtbl { + typedef typename CGlueC::Context Context; + int32_t (*keyboard)(CGlueC *cont, KeyboardBase, Context> *ok_out); + int32_t (*into_keyboard)(CGlueC cont, IntoKeyboard, Context> *ok_out); +}; + +template +struct OsKeyboardVtblImpl : OsKeyboardVtbl { +constexpr OsKeyboardVtblImpl() : + OsKeyboardVtbl { + &Impl::keyboard, + &Impl::into_keyboard + } {} +}; + +/** + * Trait group potentially implementing `:: cglue :: ext :: core :: clone :: Clone < > + Os < > + MemoryView < > + OsKeyboard < > + PhysicalMemory < > + VirtualTranslate < >` traits. + * + * Optional traits are not implemented here, however. There are numerous conversion + * functions available for safely retrieving a concrete collection of traits. + * + * `check_impl_` functions allow to check if the object implements the wanted traits. + * + * `into_impl_` functions consume the object and produce a new final structure that + * keeps only the required information. + * + * `cast_impl_` functions merely check and transform the object into a type that can + *be transformed back into `OsInstance` without losing data. + * + * `as_ref_`, and `as_mut_` functions obtain references to safe objects, but do not + * perform any memory transformations either. They are the safest to use, because + * there is no risk of accidentally consuming the whole object. + */ +template, typename CGlueCtx = CArc> +struct OsInstance { + const CloneVtbl> *vtbl_clone; + const OsVtbl> *vtbl_os; + const MemoryViewVtbl> *vtbl_memoryview; + const OsKeyboardVtbl> *vtbl_oskeyboard; + const PhysicalMemoryVtbl> *vtbl_physicalmemory; + const VirtualTranslateVtbl> *vtbl_virtualtranslate; + OsInstanceContainer container; + + OsInstance() : container{} , vtbl_clone{}, vtbl_os{}, vtbl_memoryview{}, vtbl_oskeyboard{}, vtbl_physicalmemory{}, vtbl_virtualtranslate{} {} + + ~OsInstance() noexcept { + mem_drop(std::move(container)); + } + + typedef CGlueCtx Context; + + inline OsInstance clone() const noexcept { + OsInstance __ret; + __ret.vtbl_clone = this->vtbl_clone; + __ret.vtbl_os = this->vtbl_os; + __ret.vtbl_memoryview = this->vtbl_memoryview; + __ret.vtbl_oskeyboard = this->vtbl_oskeyboard; + __ret.vtbl_physicalmemory = this->vtbl_physicalmemory; + __ret.vtbl_virtualtranslate = this->vtbl_virtualtranslate; + __ret.container = (this->vtbl_clone)->clone(&this->container); + return __ret; + } + + inline int32_t process_address_list_callback(AddressCallback callback) noexcept { + int32_t __ret = (this->vtbl_os)->process_address_list_callback(&this->container, callback); + return __ret; + } + + inline int32_t process_info_list_callback(ProcessInfoCallback callback) noexcept { + int32_t __ret = (this->vtbl_os)->process_info_list_callback(&this->container, callback); + return __ret; + } + + inline int32_t process_info_by_address(Address address, ProcessInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl_os)->process_info_by_address(&this->container, address, ok_out); + return __ret; + } + + inline int32_t process_info_by_name(CSliceRef name, ProcessInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl_os)->process_info_by_name(&this->container, name, ok_out); + return __ret; + } + + inline int32_t process_info_by_pid(Pid pid, ProcessInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl_os)->process_info_by_pid(&this->container, pid, ok_out); + return __ret; + } + + inline int32_t process_by_info(ProcessInfo info, ProcessInstance, Context> * ok_out) noexcept { + int32_t __ret = (this->vtbl_os)->process_by_info(&this->container, info, ok_out); + return __ret; + } + + inline int32_t into_process_by_info(ProcessInfo info, IntoProcessInstance, Context> * ok_out) && noexcept { + auto ___ctx = StoreAll()[this->container.clone_context(), StoreAll()]; + int32_t __ret = (this->vtbl_os)->into_process_by_info(this->container, info, ok_out); + mem_forget(this->container); + return __ret; + } + + inline int32_t process_by_address(Address addr, ProcessInstance, Context> * ok_out) noexcept { + int32_t __ret = (this->vtbl_os)->process_by_address(&this->container, addr, ok_out); + return __ret; + } + + inline int32_t process_by_name(CSliceRef name, ProcessInstance, Context> * ok_out) noexcept { + int32_t __ret = (this->vtbl_os)->process_by_name(&this->container, name, ok_out); + return __ret; + } + + inline int32_t process_by_pid(Pid pid, ProcessInstance, Context> * ok_out) noexcept { + int32_t __ret = (this->vtbl_os)->process_by_pid(&this->container, pid, ok_out); + return __ret; + } + + inline int32_t into_process_by_address(Address addr, IntoProcessInstance, Context> * ok_out) && noexcept { + auto ___ctx = StoreAll()[this->container.clone_context(), StoreAll()]; + int32_t __ret = (this->vtbl_os)->into_process_by_address(this->container, addr, ok_out); + mem_forget(this->container); + return __ret; + } + + inline int32_t into_process_by_name(CSliceRef name, IntoProcessInstance, Context> * ok_out) && noexcept { + auto ___ctx = StoreAll()[this->container.clone_context(), StoreAll()]; + int32_t __ret = (this->vtbl_os)->into_process_by_name(this->container, name, ok_out); + mem_forget(this->container); + return __ret; + } + + inline int32_t into_process_by_pid(Pid pid, IntoProcessInstance, Context> * ok_out) && noexcept { + auto ___ctx = StoreAll()[this->container.clone_context(), StoreAll()]; + int32_t __ret = (this->vtbl_os)->into_process_by_pid(this->container, pid, ok_out); + mem_forget(this->container); + return __ret; + } + + inline int32_t module_address_list_callback(AddressCallback callback) noexcept { + int32_t __ret = (this->vtbl_os)->module_address_list_callback(&this->container, callback); + return __ret; + } + + inline int32_t module_list_callback(ModuleInfoCallback callback) noexcept { + int32_t __ret = (this->vtbl_os)->module_list_callback(&this->container, callback); + return __ret; + } + + inline int32_t module_by_address(Address address, ModuleInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl_os)->module_by_address(&this->container, address, ok_out); + return __ret; + } + + inline int32_t module_by_name(CSliceRef name, ModuleInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl_os)->module_by_name(&this->container, name, ok_out); + return __ret; + } + + inline int32_t primary_module_address(Address * ok_out) noexcept { + int32_t __ret = (this->vtbl_os)->primary_module_address(&this->container, ok_out); + return __ret; + } + + inline int32_t primary_module(ModuleInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl_os)->primary_module(&this->container, ok_out); + return __ret; + } + + inline int32_t module_import_list_callback(const ModuleInfo * info, ImportCallback callback) noexcept { + int32_t __ret = (this->vtbl_os)->module_import_list_callback(&this->container, info, callback); + return __ret; + } + + inline int32_t module_export_list_callback(const ModuleInfo * info, ExportCallback callback) noexcept { + int32_t __ret = (this->vtbl_os)->module_export_list_callback(&this->container, info, callback); + return __ret; + } + + inline int32_t module_section_list_callback(const ModuleInfo * info, SectionCallback callback) noexcept { + int32_t __ret = (this->vtbl_os)->module_section_list_callback(&this->container, info, callback); + return __ret; + } + + inline int32_t module_import_by_name(const ModuleInfo * info, CSliceRef name, ImportInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl_os)->module_import_by_name(&this->container, info, name, ok_out); + return __ret; + } + + inline int32_t module_export_by_name(const ModuleInfo * info, CSliceRef name, ExportInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl_os)->module_export_by_name(&this->container, info, name, ok_out); + return __ret; + } + + inline int32_t module_section_by_name(const ModuleInfo * info, CSliceRef name, SectionInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl_os)->module_section_by_name(&this->container, info, name, ok_out); + return __ret; + } + + inline const OsInfo * info() const noexcept { + const OsInfo * __ret = (this->vtbl_os)->info(&this->container); + return __ret; + } + + inline int32_t read_raw_iter(ReadRawMemOps data) noexcept { + int32_t __ret = (this->vtbl_memoryview)->read_raw_iter(&this->container, data); + return __ret; + } + + inline int32_t write_raw_iter(WriteRawMemOps data) noexcept { + int32_t __ret = (this->vtbl_memoryview)->write_raw_iter(&this->container, data); + return __ret; + } + + inline MemoryViewMetadata memoryview_metadata() const noexcept { + MemoryViewMetadata __ret = (this->vtbl_memoryview)->metadata(&this->container); + return __ret; + } + + inline int32_t read_iter(CIterator inp, ReadCallback * out, ReadCallback * out_fail) noexcept { + int32_t __ret = (this->vtbl_memoryview)->read_iter(&this->container, inp, out, out_fail); + return __ret; + } + + inline int32_t read_raw_list(CSliceMut data) noexcept { + int32_t __ret = (this->vtbl_memoryview)->read_raw_list(&this->container, data); + return __ret; + } + + inline int32_t read_raw_into(Address addr, CSliceMut out) noexcept { + int32_t __ret = (this->vtbl_memoryview)->read_raw_into(&this->container, addr, out); + return __ret; + } + + inline int32_t write_iter(CIterator inp, WriteCallback * out, WriteCallback * out_fail) noexcept { + int32_t __ret = (this->vtbl_memoryview)->write_iter(&this->container, inp, out, out_fail); + return __ret; + } + + inline int32_t write_raw_list(CSliceRef data) noexcept { + int32_t __ret = (this->vtbl_memoryview)->write_raw_list(&this->container, data); + return __ret; + } + + inline int32_t write_raw(Address addr, CSliceRef data) noexcept { + int32_t __ret = (this->vtbl_memoryview)->write_raw(&this->container, addr, data); + return __ret; + } + + inline int32_t keyboard(KeyboardBase, Context> * ok_out) noexcept { + int32_t __ret = (this->vtbl_oskeyboard)->keyboard(&this->container, ok_out); + return __ret; + } + + inline int32_t into_keyboard(IntoKeyboard, Context> * ok_out) && noexcept { + auto ___ctx = StoreAll()[this->container.clone_context(), StoreAll()]; + int32_t __ret = (this->vtbl_oskeyboard)->into_keyboard(this->container, ok_out); + mem_forget(this->container); + return __ret; + } + + inline int32_t phys_read_raw_iter(PhysicalReadMemOps data) noexcept { + int32_t __ret = (this->vtbl_physicalmemory)->phys_read_raw_iter(&this->container, data); + return __ret; + } + + inline int32_t phys_write_raw_iter(PhysicalWriteMemOps data) noexcept { + int32_t __ret = (this->vtbl_physicalmemory)->phys_write_raw_iter(&this->container, data); + return __ret; + } + + inline PhysicalMemoryMetadata physicalmemory_metadata() const noexcept { + PhysicalMemoryMetadata __ret = (this->vtbl_physicalmemory)->metadata(&this->container); + return __ret; + } + + inline void set_mem_map(CSliceRef _mem_map) noexcept { + (this->vtbl_physicalmemory)->set_mem_map(&this->container, _mem_map); + + } + + inline MemoryViewBase, Context> into_phys_view() && noexcept { + auto ___ctx = StoreAll()[this->container.clone_context(), StoreAll()]; + MemoryViewBase, Context> __ret = (this->vtbl_physicalmemory)->into_phys_view(this->container); + mem_forget(this->container); + return __ret; + } + + inline MemoryViewBase, Context> phys_view() noexcept { + MemoryViewBase, Context> __ret = (this->vtbl_physicalmemory)->phys_view(&this->container); + return __ret; + } + + inline void virt_to_phys_list(CSliceRef addrs, VirtualTranslationCallback out, VirtualTranslationFailCallback out_fail) noexcept { + (this->vtbl_virtualtranslate)->virt_to_phys_list(&this->container, addrs, out, out_fail); + + } + + inline void virt_to_phys_range(Address start, Address end, VirtualTranslationCallback out) noexcept { + (this->vtbl_virtualtranslate)->virt_to_phys_range(&this->container, start, end, out); + + } + + inline void virt_translation_map_range(Address start, Address end, VirtualTranslationCallback out) noexcept { + (this->vtbl_virtualtranslate)->virt_translation_map_range(&this->container, start, end, out); + + } + + inline void virt_page_map_range(imem gap_size, Address start, Address end, MemoryRangeCallback out) noexcept { + (this->vtbl_virtualtranslate)->virt_page_map_range(&this->container, gap_size, start, end, out); + + } + + inline int32_t virt_to_phys(Address address, PhysicalAddress * ok_out) noexcept { + int32_t __ret = (this->vtbl_virtualtranslate)->virt_to_phys(&this->container, address, ok_out); + return __ret; + } + + inline int32_t virt_page_info(Address addr, Page * ok_out) noexcept { + int32_t __ret = (this->vtbl_virtualtranslate)->virt_page_info(&this->container, addr, ok_out); + return __ret; + } + + inline void virt_translation_map(VirtualTranslationCallback out) noexcept { + (this->vtbl_virtualtranslate)->virt_translation_map(&this->container, out); + + } + + inline COption
phys_to_virt(Address phys) noexcept { + COption
__ret = (this->vtbl_virtualtranslate)->phys_to_virt(&this->container, phys); + return __ret; + } + + inline void virt_page_map(imem gap_size, MemoryRangeCallback out) noexcept { + (this->vtbl_virtualtranslate)->virt_page_map(&this->container, gap_size, out); + + } + +}; + +template> +using OsInstanceBaseCtxBox = OsInstance, CGlueCtx>; + +template +using OsInstanceBaseArcBox = OsInstanceBaseCtxBox>; +// Typedef for default contaienr and context type +template +using OsInstanceBase = OsInstanceBaseArcBox; + +using OsInstanceArcBox = OsInstanceBaseArcBox; + +using MuOsInstanceArcBox = OsInstanceArcBox; +// Typedef for default contaienr and context type +using MuOsInstance = MuOsInstanceArcBox; + +template> +using ProcessInstanceBaseCtxBox = ProcessInstance, CGlueCtx>; + +template +using ProcessInstanceBaseArcBox = ProcessInstanceBaseCtxBox>; +// Typedef for default contaienr and context type +template +using ProcessInstanceBase = ProcessInstanceBaseArcBox; + +using ProcessInstanceArcBox = ProcessInstanceBaseArcBox; + +template> +using IntoProcessInstanceBaseCtxBox = IntoProcessInstance, CGlueCtx>; + +template +using IntoProcessInstanceBaseArcBox = IntoProcessInstanceBaseCtxBox>; +// Typedef for default contaienr and context type +template +using IntoProcessInstanceBase = IntoProcessInstanceBaseArcBox; + +using IntoProcessInstanceArcBox = IntoProcessInstanceBaseArcBox; + +/** + * CtxBoxed CGlue trait object for trait MemoryView with context. + */ +template> +using MemoryViewBaseCtxBox = MemoryViewBase, CGlueCtx>; + +/** + * Boxed CGlue trait object for trait MemoryView with a [`CArc`](cglue::arc::CArc) reference counted context. + */ +template +using MemoryViewBaseArcBox = MemoryViewBaseCtxBox>; + +/** + * Opaque Boxed CGlue trait object for trait MemoryView with a [`CArc`](cglue::arc::CArc) reference counted context. + */ +using MemoryViewArcBox = MemoryViewBaseArcBox; +// Typedef for default contaienr and context type +using MemoryView = MemoryViewArcBox; + +extern "C" { + +extern const ArchitectureObj *X86_32; + +extern const ArchitectureObj *X86_32_PAE; + +extern const ArchitectureObj *X86_64; + +/** + * Initialize logging with selected logging level. + */ +void log_init(LevelFilter level_filter); + +/** + * Logs a error message via log::error! + * + * # Safety + * + * The provided string must be a valid null-terminated char array. + */ +void log_error(const char *s); + +/** + * Logs a warning message via log::warn! + * + * # Safety + * + * The provided string must be a valid null-terminated char array. + */ +void log_warn(const char *s); + +/** + * Logs a info message via log::info! + * + * # Safety + * + * The provided string must be a valid null-terminated char array. + */ +void log_info(const char *s); + +/** + * Logs a debug message via log::debug! + * + * # Safety + * + * The provided string must be a valid null-terminated char array. + */ +void log_debug(const char *s); + +/** + * Logs a trace message via log::trace! + * + * # Safety + * + * The provided string must be a valid null-terminated char array. + */ +void log_trace(const char *s); + +/** + * Logs an error code with custom log level. + */ +void log_errorcode(Level level, int32_t error); + +/** + * Logs an error with debug log level. + */ +void log_debug_errorcode(int32_t error); + +/** + * Sets new maximum log level. + * + * If `inventory` is supplied, the log level is also updated within all plugin instances. However, + * if it is not supplied, plugins will not have their log levels updated, potentially leading to + * lower performance, or less logging than expected. + */ +void log_set_max_level(LevelFilter level_filter, const Inventory *inventory); + +/** + * Helper to convert `Address` to a `PhysicalAddress` + * + * This will create a `PhysicalAddress` with `UNKNOWN` PageType. + */ +PhysicalAddress addr_to_paddr(Address address); + +/** + * Create a new connector inventory + * + * This function will try to find connectors using PATH environment variable + * + * Note that all functions go through each directories, and look for a `memflow` directory, + * and search for libraries in those. + * + * # Safety + * + * Inventory is inherently unsafe, because it loads shared libraries which can not be + * guaranteed to be safe. + */ +Inventory *inventory_scan(); + +/** + * Create a new inventory with custom path string + * + * # Safety + * + * `path` must be a valid null terminated string + */ +Inventory *inventory_scan_path(const char *path); + +/** + * Add a directory to an existing inventory + * + * # Safety + * + * `dir` must be a valid null terminated string + */ +int32_t inventory_add_dir(Inventory *inv, const char *dir); + +/** + * Create a connector with given arguments + * + * This creates an instance of `ConnectorInstance`. + * + * This instance needs to be dropped using `connector_drop`. + * + * # Arguments + * + * * `name` - name of the connector to use + * * `args` - arguments to be passed to the connector upon its creation + * + * # Safety + * + * Both `name`, and `args` must be valid null terminated strings. + * + * Any error strings returned by the connector must not be outputed after the connector gets + * freed, because that operation could cause the underlying shared library to get unloaded. + */ +int32_t inventory_create_connector(Inventory *inv, + const char *name, + const char *args, + MuConnectorInstanceArcBox *out); + +/** + * Create a OS instance with given arguments + * + * This creates an instance of `KernelInstance`. + * + * This instance needs to be freed using `os_drop`. + * + * # Arguments + * + * * `name` - name of the OS to use + * * `args` - arguments to be passed to the connector upon its creation + * * `mem` - a previously initialized connector instance + * * `out` - a valid memory location that will contain the resulting os-instance + * + * # Remarks + * + * The `mem` connector instance is being _moved_ into the os layer. + * This means upon calling `os_drop` it is not unnecessary to call `connector_drop` anymore. + * + * # Safety + * + * Both `name`, and `args` must be valid null terminated strings. + * + * Any error strings returned by the connector must not be outputed after the connector gets + * freed, because that operation could cause the underlying shared library to get unloaded. + */ +int32_t inventory_create_os(Inventory *inv, + const char *name, + const char *args, + ConnectorInstanceArcBox *mem, + MuOsInstanceArcBox *out); + +/** + * Free a os plugin + * + * # Safety + * + * `os` must point to a valid `OsInstance` that was created using one of the provided + * functions. + */ +void os_drop(OsInstanceArcBox *os); + +/** + * Clone a connector + * + * This method is useful when needing to perform multithreaded operations, as a connector is not + * guaranteed to be thread safe. Every single cloned instance also needs to be dropped using + * `connector_drop`. + * + * # Safety + * + * `conn` has to point to a a valid `CloneablePhysicalMemory` created by one of the provided + * functions. + */ +void connector_clone(const ConnectorInstanceArcBox *conn, MuConnectorInstanceArcBox *out); + +/** + * Free a connector instance + * + * # Safety + * + * `conn` has to point to a valid [`ConnectorInstance`](ConnectorInstanceArcBox) created by one of the provided + * functions. + * + * There has to be no instance of `PhysicalMemory` created from the input `conn`, because they + * will become invalid. + */ +void connector_drop(ConnectorInstanceArcBox *conn); + +/** + * Free a connector inventory + * + * # Safety + * + * `inv` must point to a valid `Inventory` that was created using one of the provided + * functions. + */ +void inventory_free(Inventory *inv); + +uint8_t arch_bits(const ArchitectureObj *arch); + +Endianess arch_endianess(const ArchitectureObj *arch); + +uintptr_t arch_page_size(const ArchitectureObj *arch); + +uintptr_t arch_size_addr(const ArchitectureObj *arch); + +uint8_t arch_address_space_bits(const ArchitectureObj *arch); + +/** + * Free an architecture reference + * + * # Safety + * + * `arch` must be a valid heap allocated reference created by one of the API's functions. + */ +void arch_free(ArchitectureObj *arch); + +bool is_x86_arch(const ArchitectureObj *arch); + +} // extern "C" + + +template +struct CGlueTraitObj>, C, R> { + const CloneVtbl> *vtbl; + CGlueObjContainer container; + + CGlueTraitObj() : container{} {} + + ~CGlueTraitObj() noexcept { + mem_drop(std::move(container)); + } + + typedef C Context; + + inline CGlueTraitObj clone() const noexcept { + CGlueTraitObj __ret; + __ret.vtbl = this->vtbl; + __ret.container = (this->vtbl)->clone(&this->container); + return __ret; + } + +}; + +template +struct CGlueTraitObj>, C, R> { + const MemoryViewVtbl> *vtbl; + CGlueObjContainer container; + + CGlueTraitObj() : container{} {} + + ~CGlueTraitObj() noexcept { + mem_drop(std::move(container)); + } + + typedef C Context; + + inline int32_t read_raw_iter(ReadRawMemOps data) noexcept { + int32_t __ret = (this->vtbl)->read_raw_iter(&this->container, data); + return __ret; + } + + inline int32_t write_raw_iter(WriteRawMemOps data) noexcept { + int32_t __ret = (this->vtbl)->write_raw_iter(&this->container, data); + return __ret; + } + + inline MemoryViewMetadata metadata() const noexcept { + MemoryViewMetadata __ret = (this->vtbl)->metadata(&this->container); + return __ret; + } + + inline int32_t read_iter(CIterator inp, ReadCallback * out, ReadCallback * out_fail) noexcept { + int32_t __ret = (this->vtbl)->read_iter(&this->container, inp, out, out_fail); + return __ret; + } + + inline int32_t read_raw_list(CSliceMut data) noexcept { + int32_t __ret = (this->vtbl)->read_raw_list(&this->container, data); + return __ret; + } + + inline int32_t read_raw_into(Address addr, CSliceMut out) noexcept { + int32_t __ret = (this->vtbl)->read_raw_into(&this->container, addr, out); + return __ret; + } + + inline int32_t write_iter(CIterator inp, WriteCallback * out, WriteCallback * out_fail) noexcept { + int32_t __ret = (this->vtbl)->write_iter(&this->container, inp, out, out_fail); + return __ret; + } + + inline int32_t write_raw_list(CSliceRef data) noexcept { + int32_t __ret = (this->vtbl)->write_raw_list(&this->container, data); + return __ret; + } + + inline int32_t write_raw(Address addr, CSliceRef data) noexcept { + int32_t __ret = (this->vtbl)->write_raw(&this->container, addr, data); + return __ret; + } + +}; + +template +struct CGlueTraitObj>, C, R> { + const PhysicalMemoryVtbl> *vtbl; + CGlueObjContainer container; + + CGlueTraitObj() : container{} {} + + ~CGlueTraitObj() noexcept { + mem_drop(std::move(container)); + } + + typedef C Context; + + inline int32_t phys_read_raw_iter(PhysicalReadMemOps data) noexcept { + int32_t __ret = (this->vtbl)->phys_read_raw_iter(&this->container, data); + return __ret; + } + + inline int32_t phys_write_raw_iter(PhysicalWriteMemOps data) noexcept { + int32_t __ret = (this->vtbl)->phys_write_raw_iter(&this->container, data); + return __ret; + } + + inline PhysicalMemoryMetadata metadata() const noexcept { + PhysicalMemoryMetadata __ret = (this->vtbl)->metadata(&this->container); + return __ret; + } + + inline void set_mem_map(CSliceRef _mem_map) noexcept { + (this->vtbl)->set_mem_map(&this->container, _mem_map); + + } + + inline MemoryViewBase, Context> into_phys_view() && noexcept { + auto ___ctx = StoreAll()[this->container.clone_context(), StoreAll()]; + MemoryViewBase, Context> __ret = (this->vtbl)->into_phys_view(this->container); + mem_forget(this->container); + return __ret; + } + + inline MemoryViewBase, Context> phys_view() noexcept { + MemoryViewBase, Context> __ret = (this->vtbl)->phys_view(&this->container); + return __ret; + } + +}; + +template +struct CGlueTraitObj>, C, R> { + const CpuStateVtbl> *vtbl; + CGlueObjContainer container; + + CGlueTraitObj() : container{} {} + + ~CGlueTraitObj() noexcept { + mem_drop(std::move(container)); + } + + typedef C Context; + + inline void pause() noexcept { + (this->vtbl)->pause(&this->container); + + } + + inline void resume() noexcept { + (this->vtbl)->resume(&this->container); + + } + +}; + +template +struct CGlueTraitObj>, C, R> { + const ConnectorCpuStateVtbl> *vtbl; + CGlueObjContainer container; + + CGlueTraitObj() : container{} {} + + ~CGlueTraitObj() noexcept { + mem_drop(std::move(container)); + } + + typedef C Context; + + inline int32_t cpu_state(CpuStateBase, Context> * ok_out) noexcept { + int32_t __ret = (this->vtbl)->cpu_state(&this->container, ok_out); + return __ret; + } + + inline int32_t into_cpu_state(IntoCpuState, Context> * ok_out) && noexcept { + auto ___ctx = StoreAll()[this->container.clone_context(), StoreAll()]; + int32_t __ret = (this->vtbl)->into_cpu_state(this->container, ok_out); + mem_forget(this->container); + return __ret; + } + +}; + +template +struct CGlueTraitObj>, C, R> { + const ProcessVtbl> *vtbl; + CGlueObjContainer container; + + CGlueTraitObj() : container{} {} + + ~CGlueTraitObj() noexcept { + mem_drop(std::move(container)); + } + + typedef C Context; + + inline ProcessState state() noexcept { + ProcessState __ret = (this->vtbl)->state(&this->container); + return __ret; + } + + inline int32_t set_dtb(Address dtb1, Address dtb2) noexcept { + int32_t __ret = (this->vtbl)->set_dtb(&this->container, dtb1, dtb2); + return __ret; + } + + inline int32_t module_address_list_callback(const ArchitectureIdent * target_arch, ModuleAddressCallback callback) noexcept { + int32_t __ret = (this->vtbl)->module_address_list_callback(&this->container, target_arch, callback); + return __ret; + } + + inline int32_t module_list_callback(const ArchitectureIdent * target_arch, ModuleInfoCallback callback) noexcept { + int32_t __ret = (this->vtbl)->module_list_callback(&this->container, target_arch, callback); + return __ret; + } + + inline int32_t module_by_address(Address address, ArchitectureIdent architecture, ModuleInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl)->module_by_address(&this->container, address, architecture, ok_out); + return __ret; + } + + inline int32_t module_by_name_arch(CSliceRef name, const ArchitectureIdent * architecture, ModuleInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl)->module_by_name_arch(&this->container, name, architecture, ok_out); + return __ret; + } + + inline int32_t module_by_name(CSliceRef name, ModuleInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl)->module_by_name(&this->container, name, ok_out); + return __ret; + } + + inline int32_t primary_module_address(Address * ok_out) noexcept { + int32_t __ret = (this->vtbl)->primary_module_address(&this->container, ok_out); + return __ret; + } + + inline int32_t primary_module(ModuleInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl)->primary_module(&this->container, ok_out); + return __ret; + } + + inline int32_t module_import_list_callback(const ModuleInfo * info, ImportCallback callback) noexcept { + int32_t __ret = (this->vtbl)->module_import_list_callback(&this->container, info, callback); + return __ret; + } + + inline int32_t module_export_list_callback(const ModuleInfo * info, ExportCallback callback) noexcept { + int32_t __ret = (this->vtbl)->module_export_list_callback(&this->container, info, callback); + return __ret; + } + + inline int32_t module_section_list_callback(const ModuleInfo * info, SectionCallback callback) noexcept { + int32_t __ret = (this->vtbl)->module_section_list_callback(&this->container, info, callback); + return __ret; + } + + inline int32_t module_import_by_name(const ModuleInfo * info, CSliceRef name, ImportInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl)->module_import_by_name(&this->container, info, name, ok_out); + return __ret; + } + + inline int32_t module_export_by_name(const ModuleInfo * info, CSliceRef name, ExportInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl)->module_export_by_name(&this->container, info, name, ok_out); + return __ret; + } + + inline int32_t module_section_by_name(const ModuleInfo * info, CSliceRef name, SectionInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl)->module_section_by_name(&this->container, info, name, ok_out); + return __ret; + } + + inline const ProcessInfo * info() const noexcept { + const ProcessInfo * __ret = (this->vtbl)->info(&this->container); + return __ret; + } + + inline void mapped_mem_range(imem gap_size, Address start, Address end, MemoryRangeCallback out) noexcept { + (this->vtbl)->mapped_mem_range(&this->container, gap_size, start, end, out); + + } + + inline void mapped_mem(imem gap_size, MemoryRangeCallback out) noexcept { + (this->vtbl)->mapped_mem(&this->container, gap_size, out); + + } + +}; + +template +struct CGlueTraitObj>, C, R> { + const VirtualTranslateVtbl> *vtbl; + CGlueObjContainer container; + + CGlueTraitObj() : container{} {} + + ~CGlueTraitObj() noexcept { + mem_drop(std::move(container)); + } + + typedef C Context; + + inline void virt_to_phys_list(CSliceRef addrs, VirtualTranslationCallback out, VirtualTranslationFailCallback out_fail) noexcept { + (this->vtbl)->virt_to_phys_list(&this->container, addrs, out, out_fail); + + } + + inline void virt_to_phys_range(Address start, Address end, VirtualTranslationCallback out) noexcept { + (this->vtbl)->virt_to_phys_range(&this->container, start, end, out); + + } + + inline void virt_translation_map_range(Address start, Address end, VirtualTranslationCallback out) noexcept { + (this->vtbl)->virt_translation_map_range(&this->container, start, end, out); + + } + + inline void virt_page_map_range(imem gap_size, Address start, Address end, MemoryRangeCallback out) noexcept { + (this->vtbl)->virt_page_map_range(&this->container, gap_size, start, end, out); + + } + + inline int32_t virt_to_phys(Address address, PhysicalAddress * ok_out) noexcept { + int32_t __ret = (this->vtbl)->virt_to_phys(&this->container, address, ok_out); + return __ret; + } + + inline int32_t virt_page_info(Address addr, Page * ok_out) noexcept { + int32_t __ret = (this->vtbl)->virt_page_info(&this->container, addr, ok_out); + return __ret; + } + + inline void virt_translation_map(VirtualTranslationCallback out) noexcept { + (this->vtbl)->virt_translation_map(&this->container, out); + + } + + inline COption
phys_to_virt(Address phys) noexcept { + COption
__ret = (this->vtbl)->phys_to_virt(&this->container, phys); + return __ret; + } + + inline void virt_page_map(imem gap_size, MemoryRangeCallback out) noexcept { + (this->vtbl)->virt_page_map(&this->container, gap_size, out); + + } + +}; + +template +struct CGlueTraitObj>, C, R> { + const OsVtbl> *vtbl; + CGlueObjContainer container; + + CGlueTraitObj() : container{} {} + + ~CGlueTraitObj() noexcept { + mem_drop(std::move(container)); + } + + typedef C Context; + + inline int32_t process_address_list_callback(AddressCallback callback) noexcept { + int32_t __ret = (this->vtbl)->process_address_list_callback(&this->container, callback); + return __ret; + } + + inline int32_t process_info_list_callback(ProcessInfoCallback callback) noexcept { + int32_t __ret = (this->vtbl)->process_info_list_callback(&this->container, callback); + return __ret; + } + + inline int32_t process_info_by_address(Address address, ProcessInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl)->process_info_by_address(&this->container, address, ok_out); + return __ret; + } + + inline int32_t process_info_by_name(CSliceRef name, ProcessInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl)->process_info_by_name(&this->container, name, ok_out); + return __ret; + } + + inline int32_t process_info_by_pid(Pid pid, ProcessInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl)->process_info_by_pid(&this->container, pid, ok_out); + return __ret; + } + + inline int32_t process_by_info(ProcessInfo info, ProcessInstance, Context> * ok_out) noexcept { + int32_t __ret = (this->vtbl)->process_by_info(&this->container, info, ok_out); + return __ret; + } + + inline int32_t into_process_by_info(ProcessInfo info, IntoProcessInstance, Context> * ok_out) && noexcept { + auto ___ctx = StoreAll()[this->container.clone_context(), StoreAll()]; + int32_t __ret = (this->vtbl)->into_process_by_info(this->container, info, ok_out); + mem_forget(this->container); + return __ret; + } + + inline int32_t process_by_address(Address addr, ProcessInstance, Context> * ok_out) noexcept { + int32_t __ret = (this->vtbl)->process_by_address(&this->container, addr, ok_out); + return __ret; + } + + inline int32_t process_by_name(CSliceRef name, ProcessInstance, Context> * ok_out) noexcept { + int32_t __ret = (this->vtbl)->process_by_name(&this->container, name, ok_out); + return __ret; + } + + inline int32_t process_by_pid(Pid pid, ProcessInstance, Context> * ok_out) noexcept { + int32_t __ret = (this->vtbl)->process_by_pid(&this->container, pid, ok_out); + return __ret; + } + + inline int32_t into_process_by_address(Address addr, IntoProcessInstance, Context> * ok_out) && noexcept { + auto ___ctx = StoreAll()[this->container.clone_context(), StoreAll()]; + int32_t __ret = (this->vtbl)->into_process_by_address(this->container, addr, ok_out); + mem_forget(this->container); + return __ret; + } + + inline int32_t into_process_by_name(CSliceRef name, IntoProcessInstance, Context> * ok_out) && noexcept { + auto ___ctx = StoreAll()[this->container.clone_context(), StoreAll()]; + int32_t __ret = (this->vtbl)->into_process_by_name(this->container, name, ok_out); + mem_forget(this->container); + return __ret; + } + + inline int32_t into_process_by_pid(Pid pid, IntoProcessInstance, Context> * ok_out) && noexcept { + auto ___ctx = StoreAll()[this->container.clone_context(), StoreAll()]; + int32_t __ret = (this->vtbl)->into_process_by_pid(this->container, pid, ok_out); + mem_forget(this->container); + return __ret; + } + + inline int32_t module_address_list_callback(AddressCallback callback) noexcept { + int32_t __ret = (this->vtbl)->module_address_list_callback(&this->container, callback); + return __ret; + } + + inline int32_t module_list_callback(ModuleInfoCallback callback) noexcept { + int32_t __ret = (this->vtbl)->module_list_callback(&this->container, callback); + return __ret; + } + + inline int32_t module_by_address(Address address, ModuleInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl)->module_by_address(&this->container, address, ok_out); + return __ret; + } + + inline int32_t module_by_name(CSliceRef name, ModuleInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl)->module_by_name(&this->container, name, ok_out); + return __ret; + } + + inline int32_t primary_module_address(Address * ok_out) noexcept { + int32_t __ret = (this->vtbl)->primary_module_address(&this->container, ok_out); + return __ret; + } + + inline int32_t primary_module(ModuleInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl)->primary_module(&this->container, ok_out); + return __ret; + } + + inline int32_t module_import_list_callback(const ModuleInfo * info, ImportCallback callback) noexcept { + int32_t __ret = (this->vtbl)->module_import_list_callback(&this->container, info, callback); + return __ret; + } + + inline int32_t module_export_list_callback(const ModuleInfo * info, ExportCallback callback) noexcept { + int32_t __ret = (this->vtbl)->module_export_list_callback(&this->container, info, callback); + return __ret; + } + + inline int32_t module_section_list_callback(const ModuleInfo * info, SectionCallback callback) noexcept { + int32_t __ret = (this->vtbl)->module_section_list_callback(&this->container, info, callback); + return __ret; + } + + inline int32_t module_import_by_name(const ModuleInfo * info, CSliceRef name, ImportInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl)->module_import_by_name(&this->container, info, name, ok_out); + return __ret; + } + + inline int32_t module_export_by_name(const ModuleInfo * info, CSliceRef name, ExportInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl)->module_export_by_name(&this->container, info, name, ok_out); + return __ret; + } + + inline int32_t module_section_by_name(const ModuleInfo * info, CSliceRef name, SectionInfo * ok_out) noexcept { + int32_t __ret = (this->vtbl)->module_section_by_name(&this->container, info, name, ok_out); + return __ret; + } + + inline const OsInfo * info() const noexcept { + const OsInfo * __ret = (this->vtbl)->info(&this->container); + return __ret; + } + +}; + +template +struct CGlueTraitObj>, C, R> { + const KeyboardStateVtbl> *vtbl; + CGlueObjContainer container; + + CGlueTraitObj() : container{} {} + + ~CGlueTraitObj() noexcept { + mem_drop(std::move(container)); + } + + typedef C Context; + + inline bool is_down(int32_t vk) const noexcept { + bool __ret = (this->vtbl)->is_down(&this->container, vk); + return __ret; + } + +}; + +template +struct CGlueTraitObj>, C, R> { + const KeyboardVtbl> *vtbl; + CGlueObjContainer container; + + CGlueTraitObj() : container{} {} + + ~CGlueTraitObj() noexcept { + mem_drop(std::move(container)); + } + + typedef C Context; + + inline bool is_down(int32_t vk) noexcept { + bool __ret = (this->vtbl)->is_down(&this->container, vk); + return __ret; + } + + inline void set_down(int32_t vk, bool down) noexcept { + (this->vtbl)->set_down(&this->container, vk, down); + + } + + inline int32_t state(KeyboardStateBase, Context> * ok_out) noexcept { + int32_t __ret = (this->vtbl)->state(&this->container, ok_out); + return __ret; + } + +}; + +template +struct CGlueTraitObj>, C, R> { + const OsKeyboardVtbl> *vtbl; + CGlueObjContainer container; + + CGlueTraitObj() : container{} {} + + ~CGlueTraitObj() noexcept { + mem_drop(std::move(container)); + } + + typedef C Context; + + inline int32_t keyboard(KeyboardBase, Context> * ok_out) noexcept { + int32_t __ret = (this->vtbl)->keyboard(&this->container, ok_out); + return __ret; + } + + inline int32_t into_keyboard(IntoKeyboard, Context> * ok_out) && noexcept { + auto ___ctx = StoreAll()[this->container.clone_context(), StoreAll()]; + int32_t __ret = (this->vtbl)->into_keyboard(this->container, ok_out); + mem_forget(this->container); + return __ret; + } + +}; + +#endif // MEMFLOW_H diff --git a/apex_dma/memflow_lib/memflow-ffi/memflow_cpp.h b/apex_dma/memflow_lib/memflow-ffi/memflow_cpp.h deleted file mode 100644 index 65b5352..0000000 --- a/apex_dma/memflow_lib/memflow-ffi/memflow_cpp.h +++ /dev/null @@ -1,177 +0,0 @@ -#ifndef MEMFLOW_HLAPI_H -#define MEMFLOW_HLAPI_H - -#include "memflow.h" -#include "binddestr.h" - -#ifndef NO_STL_CONTAINERS -#include -#ifndef AUTO_STRING_SIZE -#define AUTO_STRING_SIZE 128 -#endif -#endif - -struct CConnectorInventory - : BindDestr -{ - CConnectorInventory(ConnectorInventory *inv) - : BindDestr(inv) {} - - CConnectorInventory() - : CConnectorInventory(::inventory_scan()) {} - - CConnectorInventory(const char *path) - : CConnectorInventory(::inventory_scan_path(path)) {} - - WRAP_FN(inventory, add_dir); - WRAP_FN(inventory, create_connector); -}; - -struct CPhysicalMemory - : BindDestr -{ - CPhysicalMemory(PhysicalMemoryObj *mem) - : BindDestr(mem) {} - - WRAP_FN_RAW(phys_read_raw_list); - WRAP_FN_RAW(phys_write_raw_list); - WRAP_FN_RAW(phys_metadata); - WRAP_FN_RAW(phys_read_raw_into); - WRAP_FN_RAW(phys_read_u32); - WRAP_FN_RAW(phys_read_u64); - WRAP_FN_RAW(phys_write_raw); - WRAP_FN_RAW(phys_write_u32); - WRAP_FN_RAW(phys_write_u64); - - template - T phys_read(PhysicalAddress address) { - T data; - this->phys_read_raw_into(address, (uint8_t *)&data, sizeof(T)); - return data; - } - - template - int32_t phys_write(PhysicalAddress address, const T &data) { - return this->phys_write_raw(address, (const uint8_t *)&data, sizeof(T)); - } -}; - -struct CCloneablePhysicalMemory - : BindDestr -{ - CCloneablePhysicalMemory(CloneablePhysicalMemoryObj *mem) - : BindDestr(mem) {} - - WRAP_FN(connector, clone); - WRAP_FN_RAW_TYPE(CPhysicalMemory, downcast_cloneable); -}; - -struct CVirtualMemory - : BindDestr -{ - CVirtualMemory(VirtualMemoryObj *virt_mem) - : BindDestr(virt_mem) {} - - WRAP_FN_RAW(virt_read_raw_list); - WRAP_FN_RAW(virt_write_raw_list); - WRAP_FN_RAW(virt_read_raw_into); - WRAP_FN_RAW(virt_read_u32); - WRAP_FN_RAW(virt_read_u64); - WRAP_FN_RAW(virt_write_raw); - WRAP_FN_RAW(virt_write_u32); - WRAP_FN_RAW(virt_write_u64); - - template - T virt_read(Address address) { - T data; - this->virt_read_raw_into(address, (uint8_t *)&data, sizeof(T)); - return data; - } - - template - int32_t virt_write(Address address, const T &data) { - return this->virt_write_raw(address, (const uint8_t *)&data, sizeof(T)); - } -}; - -struct CArchitecture - : BindDestr -{ - CArchitecture(ArchitectureObj *arch) - : BindDestr(arch) {} - - WRAP_FN(arch, bits); - WRAP_FN(arch, endianess); - WRAP_FN(arch, page_size); - WRAP_FN(arch, size_addr); - WRAP_FN(arch, address_space_bits); - WRAP_FN_RAW(is_x86_arch); -}; - -struct COsProcessInfo - : BindDestr -{ - COsProcessInfo(OsProcessInfoObj *info) - : BindDestr(info) {} - - WRAP_FN(os_process_info, address); - WRAP_FN(os_process_info, pid); - WRAP_FN(os_process_info, name); - WRAP_FN_TYPE(CArchitecture, os_process_info, sys_arch); - WRAP_FN_TYPE(CArchitecture, os_process_info, proc_arch); - -#ifndef NO_STL_CONTAINERS - std::string name_string(size_t max_size) { - char *buf = (char *)malloc(max_size); - if (buf) { - this->name(buf, max_size); - std::string ret = std::string(buf); - free(buf); - return ret; - } else { - return std::string(); - } - } - - std::string name_string() { - char buf[AUTO_STRING_SIZE]; - size_t ret = this->name(buf, AUTO_STRING_SIZE); - return std::string(buf); - } -#endif -}; - -struct COsProcessModuleInfo - : BindDestr -{ - COsProcessModuleInfo(OsProcessModuleInfoObj *modinfo) - : BindDestr(modinfo) {} - - WRAP_FN(os_process_module, address); - WRAP_FN(os_process_module, parent_process); - WRAP_FN(os_process_module, base); - WRAP_FN(os_process_module, size); - WRAP_FN(os_process_module, name); - -#ifndef NO_STL_CONTAINERS - std::string name_string(size_t max_size) { - char *buf = (char *)malloc(max_size); - if (buf) { - this->name(buf, max_size); - std::string ret = std::string(buf); - free(buf); - return ret; - } else { - return std::string(); - } - } - - std::string name_string() { - char buf[AUTO_STRING_SIZE]; - this->name(buf, AUTO_STRING_SIZE); - return std::string(buf); - } -#endif -}; - -#endif diff --git a/apex_dma/memflow_lib/memflow-ffi/memflow_go.yml b/apex_dma/memflow_lib/memflow-ffi/memflow_go.yml new file mode 100644 index 0000000..c140edf --- /dev/null +++ b/apex_dma/memflow_lib/memflow-ffi/memflow_go.yml @@ -0,0 +1,43 @@ +--- +GENERATOR: + PackageName: memflow_go + PackageDescription: "core components of the memflow physical memory introspection framework" + PackageLicense: "MIT" + Includes: ["memflow.h"] + FlagGroups: + - { name: "CFLAGS", traits: ["linux"], flags: [ -I../../memflow-ffi/ ] } + - { name: "LDFLAGS", traits: ["linux"], flags: [ -L../../target/release -lm -ldl -lpthread -l:libmemflow_ffi.a ] } + +PARSER: + IncludePaths: ["./"] + SourcesPaths: ["memflow.h"] + +TRANSLATOR: + ConstRules: + defines: expand + Rules: + global: + - { transform: lower } + - { action: accept, from: "^log_" } + - { action: accept, from: "^inventory_" } + - { action: accept, from: "^connector_" } + - { action: accept, from: "^phys_" } + - { action: accept, from: "^virt_" } + - { action: replace, from: "_", to: _ } + - { transform: export } + const: + - { action: replace, from: "_", to: _ } + type: + - { action: accept, from: "^MU" } + - { action: accept, from: "Inventory" } + - { action: accept, from: "^Connector" } + - { action: accept, from: "^Physical" } + - { action: accept, from: "^Virtual" } + - { action: accept, from: "^Translation" } + - { action: accept, from: "^Page" } + - { action: replace, from: "_t$" } + private: + - { transform: unexport } + post-global: + - { action: replace, from: _$ } + - { load: snakecase } \ No newline at end of file diff --git a/apex_dma/memflow_lib/memflow-ffi/src/connectors/mod.rs b/apex_dma/memflow_lib/memflow-ffi/src/connectors/mod.rs deleted file mode 100644 index 5d56888..0000000 --- a/apex_dma/memflow_lib/memflow-ffi/src/connectors/mod.rs +++ /dev/null @@ -1,150 +0,0 @@ -use std::ffi::CStr; -use std::os::raw::c_char; -use std::path::PathBuf; - -use memflow::connector::{ConnectorArgs, ConnectorInventory}; - -use crate::util::*; - -use crate::mem::phys_mem::CloneablePhysicalMemoryObj; - -use log::trace; - -/// Create a new connector inventory -/// -/// This function will try to find connectors using PATH environment variable -/// -/// Note that all functions go through each directories, and look for a `memflow` directory, -/// and search for libraries in those. -/// -/// # Safety -/// -/// ConnectorInventory is inherently unsafe, because it loads shared libraries which can not be -/// guaranteed to be safe. -#[no_mangle] -pub unsafe extern "C" fn inventory_scan() -> &'static mut ConnectorInventory { - to_heap(ConnectorInventory::scan()) -} - -/// Create a new inventory with custom path string -/// -/// # Safety -/// -/// `path` must be a valid null terminated string -#[no_mangle] -pub unsafe extern "C" fn inventory_scan_path( - path: *const c_char, -) -> Option<&'static mut ConnectorInventory> { - let rpath = CStr::from_ptr(path).to_string_lossy(); - ConnectorInventory::scan_path(rpath.to_string()) - .map_err(inspect_err) - .ok() - .map(to_heap) -} - -/// Add a directory to an existing inventory -/// -/// # Safety -/// -/// `dir` must be a valid null terminated string -#[no_mangle] -pub unsafe extern "C" fn inventory_add_dir( - inv: &mut ConnectorInventory, - dir: *const c_char, -) -> i32 { - let rdir = CStr::from_ptr(dir).to_string_lossy(); - - inv.add_dir(PathBuf::from(rdir.to_string())) - .int_result_logged() -} - -/// Create a connector with given arguments -/// -/// This creates an instance of a `CloneablePhysicalMemory`. To use it for physical memory -/// operations, please call `downcast_cloneable` to create a instance of `PhysicalMemory`. -/// -/// Regardless, this instance needs to be freed using `connector_free`. -/// -/// # Arguments -/// -/// * `name` - name of the connector to use -/// * `args` - arguments to be passed to the connector upon its creation -/// -/// # Safety -/// -/// Both `name`, and `args` must be valid null terminated strings. -/// -/// Any error strings returned by the connector must not be outputed after the connector gets -/// freed, because that operation could cause the underlying shared library to get unloaded. -#[no_mangle] -pub unsafe extern "C" fn inventory_create_connector( - inv: &mut ConnectorInventory, - name: *const c_char, - args: *const c_char, -) -> Option<&'static mut CloneablePhysicalMemoryObj> { - let rname = CStr::from_ptr(name).to_string_lossy(); - - if args.is_null() { - inv.create_connector_default(&rname) - .map_err(inspect_err) - .ok() - .map(to_heap) - .map(|c| c as CloneablePhysicalMemoryObj) - .map(to_heap) - } else { - let rargs = CStr::from_ptr(args).to_string_lossy(); - let conn_args = ConnectorArgs::parse(&rargs).map_err(inspect_err).ok()?; - - inv.create_connector(&rname, &conn_args) - .map_err(inspect_err) - .ok() - .map(to_heap) - .map(|c| c as CloneablePhysicalMemoryObj) - .map(to_heap) - } -} - -/// Clone a connector -/// -/// This method is useful when needing to perform multithreaded operations, as a connector is not -/// guaranteed to be thread safe. Every single cloned instance also needs to be freed using -/// `connector_free`. -/// -/// # Safety -/// -/// `conn` has to point to a a valid `CloneablePhysicalMemory` created by one of the provided -/// functions. -#[no_mangle] -pub unsafe extern "C" fn connector_clone( - conn: &CloneablePhysicalMemoryObj, -) -> &'static mut CloneablePhysicalMemoryObj { - trace!("connector_clone: {:?}", conn as *const _); - Box::leak(Box::new(Box::leak(conn.clone_box()))) -} - -/// Free a connector instance -/// -/// # Safety -/// -/// `conn` has to point to a valid `CloneablePhysicalMemoryObj` created by one of the provided -/// functions. -/// -/// There has to be no instance of `PhysicalMemory` created from the input `conn`, because they -/// will become invalid. -#[no_mangle] -pub unsafe extern "C" fn connector_free(conn: &'static mut CloneablePhysicalMemoryObj) { - trace!("connector_free: {:?}", conn as *mut _); - let _ = Box::from_raw(*Box::from_raw(conn)); -} - -/// Free a connector inventory -/// -/// # Safety -/// -/// `inv` must point to a valid `ConnectorInventory` that was created using one of the provided -/// functions. -#[no_mangle] -pub unsafe extern "C" fn inventory_free(inv: &'static mut ConnectorInventory) { - trace!("inventory_free: {:?}", inv as *mut _); - let _ = Box::from_raw(inv); -} diff --git a/apex_dma/memflow_lib/memflow-ffi/src/lib.rs b/apex_dma/memflow_lib/memflow-ffi/src/lib.rs index 05f4d9d..3f80503 100644 --- a/apex_dma/memflow_lib/memflow-ffi/src/lib.rs +++ b/apex_dma/memflow_lib/memflow-ffi/src/lib.rs @@ -2,12 +2,12 @@ pub mod log; pub mod types; -pub mod connectors; +pub mod plugins; pub mod mem; -pub mod architecture; +pub mod os; -pub mod process; +pub mod architecture; pub mod util; diff --git a/apex_dma/memflow_lib/memflow-ffi/src/log.rs b/apex_dma/memflow_lib/memflow-ffi/src/log.rs index 64b5905..6610ebe 100644 --- a/apex_dma/memflow_lib/memflow-ffi/src/log.rs +++ b/apex_dma/memflow_lib/memflow-ffi/src/log.rs @@ -1,17 +1,125 @@ -use log::Level; - -#[no_mangle] -pub extern "C" fn log_init(level_num: i32) { - let level = match level_num { - 0 => Level::Error, - 1 => Level::Warn, - 2 => Level::Info, - 3 => Level::Debug, - 4 => Level::Trace, - _ => Level::Trace, - }; - simple_logger::SimpleLogger::new() - .with_level(level.to_level_filter()) - .init() - .unwrap(); +use log::{Level, LevelFilter}; +use memflow::cglue::IntError; +use memflow::error::Error; +use memflow::plugins::Inventory; +use std::num::NonZeroI32; + +use std::ffi::CStr; +use std::os::raw::c_char; + +/// Initialize logging with selected logging level. +#[no_mangle] +pub extern "C" fn log_init(level_filter: LevelFilter) { + simplelog::TermLogger::init( + level_filter, + simplelog::Config::default(), + simplelog::TerminalMode::Stdout, + simplelog::ColorChoice::Auto, + ) + .unwrap(); +} + +// TODO: add variadic functions when this is being stabilized, see https://github.com/rust-lang/rust/issues/44930 + +/// Logs a error message via log::error! +/// +/// # Safety +/// +/// The provided string must be a valid null-terminated char array. +#[no_mangle] +pub unsafe extern "C" fn log_error(s: *const c_char) { + if !s.is_null() { + let c_str = CStr::from_ptr(s); + if let Ok(r_str) = c_str.to_str() { + log::error!("{}", r_str); + } + } +} + +/// Logs a warning message via log::warn! +/// +/// # Safety +/// +/// The provided string must be a valid null-terminated char array. +#[no_mangle] +pub unsafe extern "C" fn log_warn(s: *const c_char) { + if !s.is_null() { + let c_str = CStr::from_ptr(s); + if let Ok(r_str) = c_str.to_str() { + log::warn!("{}", r_str); + } + } +} + +/// Logs a info message via log::info! +/// +/// # Safety +/// +/// The provided string must be a valid null-terminated char array. +#[no_mangle] +pub unsafe extern "C" fn log_info(s: *const c_char) { + if !s.is_null() { + let c_str = CStr::from_ptr(s); + if let Ok(r_str) = c_str.to_str() { + log::info!("{}", r_str); + } + } +} + +/// Logs a debug message via log::debug! +/// +/// # Safety +/// +/// The provided string must be a valid null-terminated char array. +#[no_mangle] +pub unsafe extern "C" fn log_debug(s: *const c_char) { + if !s.is_null() { + let c_str = CStr::from_ptr(s); + if let Ok(r_str) = c_str.to_str() { + log::debug!("{}", r_str); + } + } +} + +/// Logs a trace message via log::trace! +/// +/// # Safety +/// +/// The provided string must be a valid null-terminated char array. +#[no_mangle] +pub unsafe extern "C" fn log_trace(s: *const c_char) { + if !s.is_null() { + let c_str = CStr::from_ptr(s); + if let Ok(r_str) = c_str.to_str() { + log::trace!("{}", r_str); + } + } +} + +/// Logs an error code with custom log level. +#[no_mangle] +pub extern "C" fn log_errorcode(level: Level, error: i32) { + if let Some(error) = NonZeroI32::new(error) { + log::log!(level, "{}", ::from_int_err(error)); + } +} + +/// Logs an error with debug log level. +#[no_mangle] +pub extern "C" fn log_debug_errorcode(error: i32) { + log_errorcode(Level::Debug, error) +} + +/// Sets new maximum log level. +/// +/// If `inventory` is supplied, the log level is also updated within all plugin instances. However, +/// if it is not supplied, plugins will not have their log levels updated, potentially leading to +/// lower performance, or less logging than expected. +#[no_mangle] +pub extern "C" fn log_set_max_level(level_filter: LevelFilter, inventory: Option<&Inventory>) { + if let Some(inventory) = inventory { + inventory.set_max_log_level(level_filter); + } else { + log::set_max_level(level_filter); + } } diff --git a/apex_dma/memflow_lib/memflow-ffi/src/mem/mod.rs b/apex_dma/memflow_lib/memflow-ffi/src/mem/mod.rs index 90264d3..b12e352 100644 --- a/apex_dma/memflow_lib/memflow-ffi/src/mem/mod.rs +++ b/apex_dma/memflow_lib/memflow-ffi/src/mem/mod.rs @@ -1,2 +1,4 @@ -pub mod phys_mem; -pub mod virt_mem; +#[allow(unused)] +pub use memflow::mem::phys_mem::*; +#[allow(unused)] +pub use memflow::mem::virt_mem::*; diff --git a/apex_dma/memflow_lib/memflow-ffi/src/mem/phys_mem.rs b/apex_dma/memflow_lib/memflow-ffi/src/mem/phys_mem.rs deleted file mode 100644 index f3705ce..0000000 --- a/apex_dma/memflow_lib/memflow-ffi/src/mem/phys_mem.rs +++ /dev/null @@ -1,146 +0,0 @@ -use memflow::mem::phys_mem::*; -use memflow::types::PhysicalAddress; - -use crate::util::*; - -use std::slice::{from_raw_parts, from_raw_parts_mut}; - -use log::trace; - -pub type CloneablePhysicalMemoryObj = &'static mut dyn CloneablePhysicalMemory; -pub type PhysicalMemoryObj = &'static mut dyn PhysicalMemory; - -/// Downcast a cloneable physical memory into a physical memory object. -/// -/// This function will take a `cloneable` and turn it into a `PhysicalMemoryObj`, which then can be -/// used by physical memory functions. -/// -/// Please note that this does not free `cloneable`, and the reference is still valid for further -/// operations. -#[no_mangle] -pub extern "C" fn downcast_cloneable( - cloneable: &'static mut CloneablePhysicalMemoryObj, -) -> &'static mut PhysicalMemoryObj { - Box::leak(Box::new((*cloneable).downcast())) -} - -/// Free a `PhysicalMemoryObj` -/// -/// This will free a reference to a `PhysicalMemoryObj`. If the physical memory object was created -/// using `downcast_cloneable`, this will NOT free the cloneable reference. -/// -/// # Safety -/// -/// `mem` must point to a valid `PhysicalMemoryObj` that was created using one of the provided -/// functions. -#[no_mangle] -pub unsafe extern "C" fn phys_free(mem: &'static mut PhysicalMemoryObj) { - trace!("phys_free: {:?}", mem as *mut _); - let _ = Box::from_raw(mem); -} - -/// Read a list of values -/// -/// This will perform `len` physical memory reads on the provided `data`. Using lists is preferable -/// for performance, because then the underlying connectors can batch those operations. -/// -/// # Safety -/// -/// `data` must be a valid array of `PhysicalReadData` with the length of at least `len` -#[no_mangle] -pub unsafe extern "C" fn phys_read_raw_list( - mem: &mut PhysicalMemoryObj, - data: *mut PhysicalReadData, - len: usize, -) -> i32 { - let data = from_raw_parts_mut(data, len); - mem.phys_read_raw_list(data).int_result() -} - -/// Write a list of values -/// -/// This will perform `len` physical memory writes on the provided `data`. Using lists is preferable -/// for performance, because then the underlying connectors can batch those operations. -/// -/// # Safety -/// -/// `data` must be a valid array of `PhysicalWriteData` with the length of at least `len` -#[no_mangle] -pub unsafe extern "C" fn phys_write_raw_list( - mem: &mut PhysicalMemoryObj, - data: *const PhysicalWriteData, - len: usize, -) -> i32 { - let data = from_raw_parts(data, len); - mem.phys_write_raw_list(data).int_result() -} - -/// Retrieve metadata about the physical memory object -#[no_mangle] -pub extern "C" fn phys_metadata(mem: &PhysicalMemoryObj) -> PhysicalMemoryMetadata { - mem.metadata() -} - -/// Read a single value into `out` from a provided `PhysicalAddress` -/// -/// # Safety -/// -/// `out` must be a valid pointer to a data buffer of at least `len` size. -#[no_mangle] -pub unsafe extern "C" fn phys_read_raw_into( - mem: &mut PhysicalMemoryObj, - addr: PhysicalAddress, - out: *mut u8, - len: usize, -) -> i32 { - mem.phys_read_raw_into(addr, from_raw_parts_mut(out, len)) - .int_result() -} - -/// Read a single 32-bit value from a provided `PhysicalAddress` -#[no_mangle] -pub extern "C" fn phys_read_u32(mem: &mut PhysicalMemoryObj, addr: PhysicalAddress) -> u32 { - mem.phys_read::(addr).unwrap_or_default() -} - -/// Read a single 64-bit value from a provided `PhysicalAddress` -#[no_mangle] -pub extern "C" fn phys_read_u64(mem: &mut PhysicalMemoryObj, addr: PhysicalAddress) -> u64 { - mem.phys_read::(addr).unwrap_or_default() -} - -/// Write a single value from `input` into a provided `PhysicalAddress` -/// -/// # Safety -/// -/// `input` must be a valid pointer to a data buffer of at least `len` size. -#[no_mangle] -pub unsafe extern "C" fn phys_write_raw( - mem: &mut PhysicalMemoryObj, - addr: PhysicalAddress, - input: *const u8, - len: usize, -) -> i32 { - mem.phys_write_raw(addr, from_raw_parts(input, len)) - .int_result() -} - -/// Write a single 32-bit value into a provided `PhysicalAddress` -#[no_mangle] -pub extern "C" fn phys_write_u32( - mem: &mut PhysicalMemoryObj, - addr: PhysicalAddress, - val: u32, -) -> i32 { - mem.phys_write(addr, &val).int_result() -} - -/// Write a single 64-bit value into a provided `PhysicalAddress` -#[no_mangle] -pub extern "C" fn phys_write_u64( - mem: &mut PhysicalMemoryObj, - addr: PhysicalAddress, - val: u64, -) -> i32 { - mem.phys_write(addr, &val).int_result() -} diff --git a/apex_dma/memflow_lib/memflow-ffi/src/mem/virt_mem.rs b/apex_dma/memflow_lib/memflow-ffi/src/mem/virt_mem.rs deleted file mode 100644 index 117b991..0000000 --- a/apex_dma/memflow_lib/memflow-ffi/src/mem/virt_mem.rs +++ /dev/null @@ -1,117 +0,0 @@ -use memflow::error::PartialResultExt; -use memflow::mem::virt_mem::*; -use memflow::types::Address; - -use crate::util::*; - -use std::slice::{from_raw_parts, from_raw_parts_mut}; - -pub type VirtualMemoryObj = &'static mut dyn VirtualMemory; - -/// Free a virtual memory object reference -/// -/// This function frees the reference to a virtual memory object. -/// -/// # Safety -/// -/// `mem` must be a valid reference to a virtual memory object. -#[no_mangle] -pub unsafe extern "C" fn virt_free(mem: &'static mut VirtualMemoryObj) { - let _ = Box::from_raw(mem); -} - -/// Read a list of values -/// -/// This will perform `len` virtual memory reads on the provided `data`. Using lists is preferable -/// for performance, because then the underlying connectors can batch those operations, and virtual -/// translation function can cut down on read operations. -/// -/// # Safety -/// -/// `data` must be a valid array of `VirtualReadData` with the length of at least `len` -#[no_mangle] -pub unsafe extern "C" fn virt_read_raw_list( - mem: &mut VirtualMemoryObj, - data: *mut VirtualReadData, - len: usize, -) -> i32 { - let data = from_raw_parts_mut(data, len); - mem.virt_read_raw_list(data).data_part().int_result() -} - -/// Write a list of values -/// -/// This will perform `len` virtual memory writes on the provided `data`. Using lists is preferable -/// for performance, because then the underlying connectors can batch those operations, and virtual -/// translation function can cut down on read operations. -/// -/// # Safety -/// -/// `data` must be a valid array of `VirtualWriteData` with the length of at least `len` -#[no_mangle] -pub unsafe extern "C" fn virt_write_raw_list( - mem: &mut VirtualMemoryObj, - data: *const VirtualWriteData, - len: usize, -) -> i32 { - let data = from_raw_parts(data, len); - mem.virt_write_raw_list(data).data_part().int_result() -} - -/// Read a single value into `out` from a provided `Address` -/// -/// # Safety -/// -/// `out` must be a valid pointer to a data buffer of at least `len` size. -#[no_mangle] -pub unsafe extern "C" fn virt_read_raw_into( - mem: &mut VirtualMemoryObj, - addr: Address, - out: *mut u8, - len: usize, -) -> i32 { - mem.virt_read_raw_into(addr, from_raw_parts_mut(out, len)) - .data_part() - .int_result() -} - -/// Read a single 32-bit value from a provided `Address` -#[no_mangle] -pub extern "C" fn virt_read_u32(mem: &mut VirtualMemoryObj, addr: Address) -> u32 { - mem.virt_read::(addr).unwrap_or_default() -} - -/// Read a single 64-bit value from a provided `Address` -#[no_mangle] -pub extern "C" fn virt_read_u64(mem: &mut VirtualMemoryObj, addr: Address) -> u64 { - mem.virt_read::(addr).unwrap_or_default() -} - -/// Write a single value from `input` into a provided `Address` -/// -/// # Safety -/// -/// `input` must be a valid pointer to a data buffer of at least `len` size. -#[no_mangle] -pub unsafe extern "C" fn virt_write_raw( - mem: &mut VirtualMemoryObj, - addr: Address, - input: *const u8, - len: usize, -) -> i32 { - mem.virt_write_raw(addr, from_raw_parts(input, len)) - .data_part() - .int_result() -} - -/// Write a single 32-bit value into a provided `Address` -#[no_mangle] -pub extern "C" fn virt_write_u32(mem: &mut VirtualMemoryObj, addr: Address, val: u32) -> i32 { - mem.virt_write(addr, &val).data_part().int_result() -} - -/// Write a single 64-bit value into a provided `Address` -#[no_mangle] -pub extern "C" fn virt_write_u64(mem: &mut VirtualMemoryObj, addr: Address, val: u64) -> i32 { - mem.virt_write(addr, &val).data_part().int_result() -} diff --git a/apex_dma/memflow_lib/memflow-ffi/src/os/mod.rs b/apex_dma/memflow_lib/memflow-ffi/src/os/mod.rs new file mode 100644 index 0000000..abc7a73 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-ffi/src/os/mod.rs @@ -0,0 +1,4 @@ +#[allow(unused)] +pub use memflow::os::*; +#[allow(unused)] +pub use memflow::plugins::*; diff --git a/apex_dma/memflow_lib/memflow-ffi/src/plugins/mod.rs b/apex_dma/memflow_lib/memflow-ffi/src/plugins/mod.rs new file mode 100644 index 0000000..48e31ee --- /dev/null +++ b/apex_dma/memflow_lib/memflow-ffi/src/plugins/mod.rs @@ -0,0 +1,215 @@ +use std::ffi::CStr; +use std::os::raw::c_char; +use std::path::PathBuf; + +use memflow::plugins::Inventory; +use memflow::plugins::{ + connector::{ConnectorInstanceArcBox, MuConnectorInstanceArcBox}, + os::{MuOsInstanceArcBox, OsInstanceArcBox}, +}; + +use crate::util::*; +use memflow::cglue::result::IntResult; + +use log::trace; + +/// Create a new connector inventory +/// +/// This function will try to find connectors using PATH environment variable +/// +/// Note that all functions go through each directories, and look for a `memflow` directory, +/// and search for libraries in those. +/// +/// # Safety +/// +/// Inventory is inherently unsafe, because it loads shared libraries which can not be +/// guaranteed to be safe. +#[no_mangle] +pub unsafe extern "C" fn inventory_scan() -> &'static mut Inventory { + to_heap(Inventory::scan()) +} + +/// Create a new inventory with custom path string +/// +/// # Safety +/// +/// `path` must be a valid null terminated string +#[no_mangle] +pub unsafe extern "C" fn inventory_scan_path( + path: *const c_char, +) -> Option<&'static mut Inventory> { + let rpath = CStr::from_ptr(path).to_string_lossy(); + Inventory::scan_path(rpath.to_string()) + .map_err(inspect_err) + .ok() + .map(to_heap) +} + +/// Add a directory to an existing inventory +/// +/// # Safety +/// +/// `dir` must be a valid null terminated string +#[no_mangle] +pub unsafe extern "C" fn inventory_add_dir(inv: &mut Inventory, dir: *const c_char) -> i32 { + let rdir = CStr::from_ptr(dir).to_string_lossy(); + + inv.add_dir(PathBuf::from(rdir.to_string())) + .into_int_result() +} + +/// Create a connector with given arguments +/// +/// This creates an instance of `ConnectorInstance`. +/// +/// This instance needs to be dropped using `connector_drop`. +/// +/// # Arguments +/// +/// * `name` - name of the connector to use +/// * `args` - arguments to be passed to the connector upon its creation +/// +/// # Safety +/// +/// Both `name`, and `args` must be valid null terminated strings. +/// +/// Any error strings returned by the connector must not be outputed after the connector gets +/// freed, because that operation could cause the underlying shared library to get unloaded. +#[no_mangle] +pub unsafe extern "C" fn inventory_create_connector( + inv: &mut Inventory, + name: *const c_char, + args: *const c_char, + out: &mut MuConnectorInstanceArcBox<'static>, +) -> i32 { + let rname = CStr::from_ptr(name).to_string_lossy(); + + if args.is_null() { + inv.create_connector(&rname, None, None) + .map_err(inspect_err) + .into_int_out_result(out) + } else { + let rargs = CStr::from_ptr(args).to_string_lossy(); + str::parse(&rargs) + .map_err(inspect_err) + .and_then(|args| inv.create_connector(&rname, None, Some(&args))) + .map_err(inspect_err) + .into_int_out_result(out) + } +} + +/// Create a OS instance with given arguments +/// +/// This creates an instance of `KernelInstance`. +/// +/// This instance needs to be freed using `os_drop`. +/// +/// # Arguments +/// +/// * `name` - name of the OS to use +/// * `args` - arguments to be passed to the connector upon its creation +/// * `mem` - a previously initialized connector instance +/// * `out` - a valid memory location that will contain the resulting os-instance +/// +/// # Remarks +/// +/// The `mem` connector instance is being _moved_ into the os layer. +/// This means upon calling `os_drop` it is not unnecessary to call `connector_drop` anymore. +/// +/// # Safety +/// +/// Both `name`, and `args` must be valid null terminated strings. +/// +/// Any error strings returned by the connector must not be outputed after the connector gets +/// freed, because that operation could cause the underlying shared library to get unloaded. +#[no_mangle] +pub unsafe extern "C" fn inventory_create_os( + inv: &mut Inventory, + name: *const c_char, + args: *const c_char, + mem: *mut ConnectorInstanceArcBox<'static>, + out: &mut MuOsInstanceArcBox<'static>, +) -> i32 { + let rname = CStr::from_ptr(name).to_string_lossy(); + let _args = CStr::from_ptr(args).to_string_lossy(); + + let mem_obj = if mem.is_null() { + None + } else { + let mem_obj = mem.read(); + // Zero out the data so that any automatic destructors on the other side do nothing. + std::ptr::write_bytes(mem, 0, 1); + Some(mem_obj) + }; + + if args.is_null() { + inv.create_os(&rname, mem_obj, None) + .map_err(inspect_err) + .into_int_out_result(out) + } else { + let rargs = CStr::from_ptr(args).to_string_lossy(); + str::parse(&rargs) + .map_err(inspect_err) + .and_then(|args| inv.create_os(&rname, mem_obj, Some(&args))) + .map_err(inspect_err) + .into_int_out_result(out) + } +} + +/// Free a os plugin +/// +/// # Safety +/// +/// `os` must point to a valid `OsInstance` that was created using one of the provided +/// functions. +#[no_mangle] +pub unsafe extern "C" fn os_drop(os: &mut OsInstanceArcBox<'static>) { + trace!("connector_drop: {:?}", os as *mut _); + std::ptr::drop_in_place(os); +} + +/// Clone a connector +/// +/// This method is useful when needing to perform multithreaded operations, as a connector is not +/// guaranteed to be thread safe. Every single cloned instance also needs to be dropped using +/// `connector_drop`. +/// +/// # Safety +/// +/// `conn` has to point to a a valid `CloneablePhysicalMemory` created by one of the provided +/// functions. +#[no_mangle] +pub unsafe extern "C" fn connector_clone( + conn: &ConnectorInstanceArcBox<'static>, + out: &mut MuConnectorInstanceArcBox<'static>, +) { + trace!("connector_clone: {:?}", conn as *const _); + *out.as_mut_ptr() = conn.clone(); +} + +/// Free a connector instance +/// +/// # Safety +/// +/// `conn` has to point to a valid [`ConnectorInstance`](ConnectorInstanceArcBox) created by one of the provided +/// functions. +/// +/// There has to be no instance of `PhysicalMemory` created from the input `conn`, because they +/// will become invalid. +#[no_mangle] +pub unsafe extern "C" fn connector_drop(conn: &mut ConnectorInstanceArcBox<'static>) { + trace!("connector_drop: {:?}", conn as *mut _); + std::ptr::drop_in_place(conn) +} + +/// Free a connector inventory +/// +/// # Safety +/// +/// `inv` must point to a valid `Inventory` that was created using one of the provided +/// functions. +#[no_mangle] +pub unsafe extern "C" fn inventory_free(inv: &'static mut Inventory) { + trace!("inventory_free: {:?}", inv as *mut _); + let _ = Box::from_raw(inv); +} diff --git a/apex_dma/memflow_lib/memflow-ffi/src/process.rs b/apex_dma/memflow_lib/memflow-ffi/src/process.rs deleted file mode 100644 index b3228f0..0000000 --- a/apex_dma/memflow_lib/memflow-ffi/src/process.rs +++ /dev/null @@ -1,119 +0,0 @@ -use crate::util::*; -use memflow::process::*; -use std::os::raw::c_char; -use std::slice::from_raw_parts_mut; - -use memflow::architecture::ArchitectureObj; -use memflow::types::Address; - -pub type OsProcessInfoObj = &'static dyn OsProcessInfo; - -#[no_mangle] -pub extern "C" fn os_process_info_address(obj: &OsProcessInfoObj) -> Address { - obj.address() -} - -#[no_mangle] -pub extern "C" fn os_process_info_pid(obj: &OsProcessInfoObj) -> PID { - obj.pid() -} - -/// Retreive name of the process -/// -/// This will copy at most `max_len` characters (including the null terminator) into `out` of the -/// name. -/// -/// # Safety -/// -/// `out` must be a buffer with at least `max_len` size -#[no_mangle] -pub unsafe extern "C" fn os_process_info_name( - obj: &OsProcessInfoObj, - out: *mut c_char, - max_len: usize, -) -> usize { - let name = obj.name(); - let name_bytes = name.as_bytes(); - let out_bytes = from_raw_parts_mut(out as *mut u8, std::cmp::min(max_len, name.len() + 1)); - let len = out_bytes.len(); - out_bytes[..(len - 1)].copy_from_slice(&name_bytes[..(len - 1)]); - *out_bytes.iter_mut().last().unwrap() = 0; - len -} - -#[no_mangle] -pub extern "C" fn os_process_info_sys_arch(obj: &OsProcessInfoObj) -> &ArchitectureObj { - to_heap(obj.sys_arch()) -} - -#[no_mangle] -pub extern "C" fn os_process_info_proc_arch(obj: &OsProcessInfoObj) -> &ArchitectureObj { - to_heap(obj.proc_arch()) -} - -/// Free a OsProcessInfoObj reference -/// -/// # Safety -/// -/// `obj` must point to a valid `OsProcessInfoObj`, and was created using one of the API's -/// functions. -#[no_mangle] -pub unsafe extern "C" fn os_process_info_free(obj: &'static mut OsProcessInfoObj) { - let _ = Box::from_raw(obj); -} - -pub type OsProcessModuleInfoObj = &'static dyn OsProcessModuleInfo; - -#[no_mangle] -pub extern "C" fn os_process_module_address(obj: &OsProcessModuleInfoObj) -> Address { - obj.address() -} - -#[no_mangle] -pub extern "C" fn os_process_module_parent_process(obj: &OsProcessModuleInfoObj) -> Address { - obj.parent_process() -} - -#[no_mangle] -pub extern "C" fn os_process_module_base(obj: &OsProcessModuleInfoObj) -> Address { - obj.base() -} - -#[no_mangle] -pub extern "C" fn os_process_module_size(obj: &OsProcessModuleInfoObj) -> usize { - obj.size() -} - -/// Retreive name of the module -/// -/// This will copy at most `max_len` characters (including the null terminator) into `out` of the -/// name. -/// -/// # Safety -/// -/// `out` must be a buffer with at least `max_len` size -#[no_mangle] -pub unsafe extern "C" fn os_process_module_name( - obj: &OsProcessModuleInfoObj, - out: *mut c_char, - max_len: usize, -) -> usize { - let name = obj.name(); - let name_bytes = name.as_bytes(); - let out_bytes = from_raw_parts_mut(out as *mut u8, std::cmp::min(max_len, name.len() + 1)); - let len = out_bytes.len(); - out_bytes[..(len - 1)].copy_from_slice(&name_bytes[..(len - 1)]); - *out_bytes.iter_mut().last().unwrap() = 0; - len -} - -/// Free a OsProcessModuleInfoObj reference -/// -/// # Safety -/// -/// `obj` must point to a valid `OsProcessModuleInfoObj`, and was created using one of the API's -/// functions. -#[no_mangle] -pub unsafe extern "C" fn os_process_module_free(obj: &'static mut OsProcessModuleInfoObj) { - let _ = Box::from_raw(obj); -} diff --git a/apex_dma/memflow_lib/memflow-ffi/src/util.rs b/apex_dma/memflow_lib/memflow-ffi/src/util.rs index 6070fd6..fc34b9e 100644 --- a/apex_dma/memflow_lib/memflow-ffi/src/util.rs +++ b/apex_dma/memflow_lib/memflow-ffi/src/util.rs @@ -8,37 +8,3 @@ pub fn inspect_err(e: E) -> E { pub fn to_heap(a: T) -> &'static mut T { Box::leak(Box::new(a)) } - -pub trait ToIntResult { - fn int_result(self) -> i32; - - fn int_result_logged(self) -> i32 - where - Self: Sized, - { - let res = self.int_result(); - if res != 0 { - error!("err value: {}", res); - } - res - } -} - -impl ToIntResult for Result { - fn int_result(self) -> i32 { - if self.is_ok() { - 0 - } else { - -1 - } - } - - fn int_result_logged(self) -> i32 { - if let Err(e) = self { - error!("{}", e); - -1 - } else { - 0 - } - } -} diff --git a/apex_dma/memflow_lib/memflow-ffi/src/win32.rs b/apex_dma/memflow_lib/memflow-ffi/src/win32.rs deleted file mode 100644 index 06ebbc0..0000000 --- a/apex_dma/memflow_lib/memflow-ffi/src/win32.rs +++ /dev/null @@ -1,65 +0,0 @@ -/* -use std::ffi::c_void; -use std::ptr; - -use memflow_win32::*; -*/ - -/* -/// # Safety -/// -/// this function has to be called with an initialized memory backend -/// this function will return a pointer to a win32 object that has to be freed via win32_free() -#[no_mangle] -pub unsafe extern "C" fn win32_init(mem: *mut c_void) -> *mut Win32 { - if !mem.is_null() { - let mut _mem: Box> = std::mem::transmute(mem as *mut _); - - let _os = Win32::try_with(&mut **_mem).unwrap(); - - Box::leak(_mem); - return std::mem::transmute(Box::new(_os)); - } - - ptr::null_mut() -} - -/// # Safety -/// -/// this function has to be called with a pointer that has been initialized from win32_init() -#[no_mangle] -pub unsafe extern "C" fn win32_free(win32: *mut Win32) { - if !win32.is_null() { - let _win32: Box = std::mem::transmute(win32); - // drop _win32 - } -} - -/// # Safety -/// -/// this function will return a pointer to a win32_offsets object that has to be freed via win32_offsets_free() -#[no_mangle] -pub unsafe extern "C" fn win32_offsets_init(win32: *mut Win32) -> *mut Win32Offsets { - if !win32.is_null() { - let _win32: Box = std::mem::transmute(win32); - - let _offsets = Win32Offsets::try_with_guid(&_win32.kernel_guid()).unwrap(); - - Box::leak(_win32); - return std::mem::transmute(Box::new(_offsets)); - } - - ptr::null_mut() -} - -/// # Safety -/// -/// this function has to be called with a pointer that has been initialized from win32_offsets_init() -#[no_mangle] -pub unsafe extern "C" fn win32_offsets_free(offsets: *mut Win32Offsets) { - if !offsets.is_null() { - let _offsets: Box = std::mem::transmute(offsets); - // drop _offsets - } -} -*/ diff --git a/apex_dma/memflow_lib/memflow-ffi/verify_headers.sh b/apex_dma/memflow_lib/memflow-ffi/verify_headers.sh new file mode 100644 index 0000000..c7914ca --- /dev/null +++ b/apex_dma/memflow_lib/memflow-ffi/verify_headers.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +# update cglue-bindgen +cargo +nightly install cbindgen +cargo +nightly install cglue-bindgen + +DIFFC=$(diff memflow.h <(rustup run nightly cglue-bindgen +nightly -c cglue.toml -- --config cbindgen.toml --crate memflow-ffi -l C)) +DIFFCPP=$(diff memflow.hpp <(rustup run nightly cglue-bindgen +nightly -c cglue.toml -- --config cbindgen.toml --crate memflow-ffi -l C++)) +if [ "$DIFFC" != "" ] || [ "$DIFFCPP" != "" ] +then + exit 1 +fi diff --git a/apex_dma/memflow_lib/memflow-qemu-procfs/.gitignore b/apex_dma/memflow_lib/memflow-qemu-procfs/.gitignore deleted file mode 100644 index 3582899..0000000 --- a/apex_dma/memflow_lib/memflow-qemu-procfs/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -/target -**/*.rs.bk -*.swp -.vscode -Cargo.lock diff --git a/apex_dma/memflow_lib/memflow-qemu-procfs/Cargo.toml b/apex_dma/memflow_lib/memflow-qemu-procfs/Cargo.toml deleted file mode 100644 index c35f691..0000000 --- a/apex_dma/memflow_lib/memflow-qemu-procfs/Cargo.toml +++ /dev/null @@ -1,37 +0,0 @@ -[package] -name = "memflow-qemu-procfs" -version = "0.1.5" -authors = ["ko1N ", "Aurimas Blažulionis <0x60@pm.me>"] -edition = "2018" -description = "qemu procfs connector for the memflow physical memory introspection framework" -documentation = "https://docs.rs/memflow-qemu-procfs" -readme = "README.md" -homepage = "https://memflow.github.io" -repository = "https://github.com/memflow/memflow-qemu-procfs" -license-file = "LICENSE" -keywords = [ "memflow", "introspection", "memory" ] -categories = [ "api-bindings", "memory-management", "os" ] - -[lib] -crate-type = ["lib", "cdylib"] - -[dependencies] -memflow = { version = "0.1", features = ["inventory"] } -log = { version = "0.4", default-features = false } -procfs = "0.7" -libc = "0.2" - -[dev-dependencies] -clap = "2.33" -simple_logger = "1.0" - -[profile.release] -lto = true - -[features] -default = [] -inventory = [] - -[[example]] -name = "read_phys" -path = "examples/read_phys.rs" diff --git a/apex_dma/memflow_lib/memflow-qemu-procfs/Makefile b/apex_dma/memflow_lib/memflow-qemu-procfs/Makefile deleted file mode 100644 index 75e3cd5..0000000 --- a/apex_dma/memflow_lib/memflow-qemu-procfs/Makefile +++ /dev/null @@ -1,24 +0,0 @@ -.PHONY: all release debug test install - -all: - make test - make release - -release: - cargo build --release --all-features - -debug: - cargo build --all-features - -clean: - cargo clean - -test: - cargo test --all-features - -install_user: - ./install.sh - -install: - ./install.sh --system - diff --git a/apex_dma/memflow_lib/memflow-qemu-procfs/README.md b/apex_dma/memflow_lib/memflow-qemu-procfs/README.md deleted file mode 100644 index 58571de..0000000 --- a/apex_dma/memflow_lib/memflow-qemu-procfs/README.md +++ /dev/null @@ -1,55 +0,0 @@ -# memflow-qemu-procfs - -This connector implements an interface for Qemu via the Process Filesystem on Linux. - -## Compilation - -### Installing the library - -The `./install.sh` script will just compile and install the plugin. -The connector will be installed to `~/.local/lib/memflow` by default. -Additionally the `--system` flag can be specified which will install the connector in `/usr/lib/memflow` as well. - -### Building the stand-alone connector for dynamic loading - -The stand-alone connector of this library is feature-gated behind the `inventory` feature. -To compile a dynamic library for use with the connector inventory use the following command: - -``` -cargo build --release --all-features -``` - -### Using the crate in a rust project - -To use the connector in a rust project just include it in your Cargo.toml - -``` -memflow-qemu-procfs = "0.1" -``` - -Make sure to not enable the `inventory` feature when importing multiple -connectors in a rust project without using the memflow connector inventory. -This might cause duplicated exports being generated in your project. - -## Arguments - -- `name` - the name of the virtual machine (default argument, optional) - -## Permissions - -The `qemu_procfs` connector requires access to the qemu process via the linux procfs. This means any process which loads this connector requires to have at least ptrace permissions set. - -To set ptrace permissions on a binary simply use: -```bash -sudo setcap 'CAP_SYS_PTRACE=ep' [filename] -``` - -Alternatively you can just run the binary via `sudo`. - -## License - -Licensed under MIT License, see [LICENSE](LICENSE). - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, shall be licensed as above, without any additional terms or conditions. diff --git a/apex_dma/memflow_lib/memflow-qemu-procfs/examples/read_phys.rs b/apex_dma/memflow_lib/memflow-qemu-procfs/examples/read_phys.rs deleted file mode 100644 index 9e82e03..0000000 --- a/apex_dma/memflow_lib/memflow-qemu-procfs/examples/read_phys.rs +++ /dev/null @@ -1,45 +0,0 @@ -use std::time::Instant; - -use log::{info, Level}; - -use memflow::prelude::v1::*; - -fn main() { - simple_logger::SimpleLogger::new() - .with_level(Level::Debug.to_level_filter()) - .init() - .unwrap(); - - let mut conn = match memflow_qemu_procfs::create_connector(&ConnectorArgs::new()) { - Ok(br) => br, - Err(e) => { - info!("couldn't open memory read context: {:?}", e); - return; - } - }; - - let metadata = conn.metadata(); - info!("Received metadata: {:?}", metadata); - - let mut mem = vec![0; 8]; - conn.phys_read_raw_into(Address::from(0x1000).into(), &mut mem) - .unwrap(); - info!("Received memory: {:?}", mem); - - let start = Instant::now(); - let mut counter = 0; - loop { - let mut buf = vec![0; 0x1000]; - conn.phys_read_raw_into(Address::from(0x1000).into(), &mut buf) - .unwrap(); - - counter += 1; - if (counter % 10000000) == 0 { - let elapsed = start.elapsed().as_millis() as f64; - if elapsed > 0.0 { - info!("{} reads/sec", (f64::from(counter)) / elapsed * 1000.0); - info!("{} ms/read", elapsed / (f64::from(counter))); - } - } - } -} diff --git a/apex_dma/memflow_lib/memflow-qemu-procfs/install.sh b/apex_dma/memflow_lib/memflow-qemu-procfs/install.sh deleted file mode 100644 index f5a751c..0000000 --- a/apex_dma/memflow_lib/memflow-qemu-procfs/install.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -cargo build --release --all-features - -# install connector to system dir -if [ ! -z "$1" ] && [ $1 = "--system" ]; then - echo "installing connector system-wide in /usr/lib/memflow" - if [[ ! -d /usr/lib/memflow ]]; then - sudo mkdir /usr/lib/memflow - fi - sudo cp target/release/libmemflow_qemu_procfs.so /usr/lib/memflow -fi - -# install connector in user dir -echo "installing connector for user in ~/.local/lib/memflow" -if [[ ! -d ~/.local/lib/memflow ]]; then - mkdir -p ~/.local/lib/memflow -fi -cp target/release/libmemflow_qemu_procfs.so ~/.local/lib/memflow diff --git a/apex_dma/memflow_lib/memflow-qemu-procfs/src/lib.rs b/apex_dma/memflow_lib/memflow-qemu-procfs/src/lib.rs deleted file mode 100644 index baa1a0d..0000000 --- a/apex_dma/memflow_lib/memflow-qemu-procfs/src/lib.rs +++ /dev/null @@ -1,475 +0,0 @@ -use log::info; - -use core::ffi::c_void; -use libc::{c_ulong, iovec, pid_t, sysconf, _SC_IOV_MAX}; - -use memflow::prelude::v1::*; - -#[derive(Clone, Copy)] -#[repr(transparent)] -struct IoSendVec(iovec); - -unsafe impl Send for IoSendVec {} - -fn qemu_arg_opt(args: &[String], argname: &str, argopt: &str) -> Option { - for (idx, arg) in args.iter().enumerate() { - if arg == argname { - let name = args[idx + 1].split(','); - for (i, kv) in name.clone().enumerate() { - let kvsplt = kv.split('=').collect::>(); - if kvsplt.len() == 2 { - if kvsplt[0] == argopt { - return Some(kvsplt[1].to_string()); - } - } else if i == 0 { - return Some(kv.to_string()); - } - } - } - } - - None -} - -fn is_qemu(process: &procfs::process::Process) -> bool { - process - .cmdline() - .ok() - .and_then(|cmdline| { - cmdline.iter().nth(0).and_then(|cmd| { - std::path::Path::new(cmd) - .file_name() - .and_then(|exe| exe.to_str()) - .map(|v| v.contains("qemu-system-")) - }) - }) - .unwrap_or(false) -} - -#[derive(Clone)] -pub struct QemuProcfs { - pub pid: pid_t, - pub mem_map: MemoryMap<(Address, usize)>, - temp_iov: Box<[IoSendVec]>, -} - -impl QemuProcfs { - pub fn new() -> Result { - let prcs = procfs::process::all_processes() - .map_err(|_| Error::Connector("unable to list procfs processes"))?; - let prc = prcs - .iter() - .find(|p| is_qemu(p)) - .ok_or_else(|| Error::Connector("qemu process not found"))?; - info!("qemu process found with pid {:?}", prc.stat.pid); - - Self::with_process(prc) - } - - pub fn with_guest_name(name: &str) -> Result { - let prcs = procfs::process::all_processes() - .map_err(|_| Error::Connector("unable to list procefs processes"))?; - let (prc, _) = prcs - .iter() - .filter(|p| is_qemu(p)) - .filter_map(|p| { - if let Ok(c) = p.cmdline() { - Some((p, c)) - } else { - None - } - }) - .find(|(_, c)| qemu_arg_opt(c, "-name", "guest").unwrap_or_default() == name) - .ok_or_else(|| Error::Connector("qemu process not found"))?; - info!( - "qemu process with name {} found with pid {:?}", - name, prc.stat.pid - ); - - Self::with_process(prc) - } - - fn with_process(prc: &procfs::process::Process) -> Result { - // find biggest memory mapping in qemu process - let mut maps = prc - .maps() - .map_err(|_| Error::Connector("Unable to retrieve Qemu memory maps. Did u run memflow with the correct access rights (SYS_PTRACE or root)?"))?; - maps.sort_by(|b, a| { - (a.address.1 - a.address.0) - .partial_cmp(&(b.address.1 - b.address.0)) - .unwrap() - }); - let map = maps - .get(0) - .ok_or_else(|| Error::Connector("Qemu memory map could not be read"))?; - info!("qemu memory map found {:?}", map); - - let map_base = map.address.0 as usize; - let map_size = (map.address.1 - map.address.0) as usize; - info!("qemu memory map size: {:x}", map_size); - - // TODO: instead of hardcoding the memory regions per machine we could just use the hmp to retrieve the proper ranges: - // sudo virsh qemu-monitor-command win10 --hmp 'info mtree -f' | grep pc\.ram - - // find machine architecture - let machine = qemu_arg_opt( - &prc.cmdline() - .map_err(|_| Error::Connector("Unable to parse qemu arguments"))?, - "-machine", - "type", - ) - .unwrap_or_else(|| "pc".into()); - info!("qemu process started with machine: {}", machine); - - let mut mem_map = MemoryMap::new(); - if machine.contains("q35") { - // q35 -> subtract 2GB - /* - 0000000000000000-000000000009ffff (prio 0, ram): pc.ram KVM - 00000000000c0000-00000000000c3fff (prio 0, rom): pc.ram @00000000000c0000 KVM - 0000000000100000-000000007fffffff (prio 0, ram): pc.ram @0000000000100000 KVM - 0000000100000000-000000047fffffff (prio 0, ram): pc.ram @0000000080000000 KVM - */ - // we add all regions additionally shifted to the proper qemu memory map address - mem_map.push_range(Address::NULL, size::kb(640).into(), map_base.into()); // section: [start - 640kb] -> map to start - // If larger than this specific size, second half after 2 gigs gets moved over past 4gb - // TODO: Probably the same happens with i1440-fx - if map_size >= size::mb(2816) { - mem_map.push_range( - size::mb(1).into(), - size::gb(2).into(), - (map_base + size::mb(1)).into(), - ); // section: [1mb - 2gb] -> map to 1mb - mem_map.push_range( - size::gb(4).into(), - (map_size + size::gb(2)).into(), - (map_base + size::gb(2)).into(), - ); // section: [4gb - max] -> map to 2gb - } else { - mem_map.push_range( - size::mb(1).into(), - map_size.into(), - (map_base + size::mb(1)).into(), - ); // section: [1mb - max] -> map to 1mb - } - } else { - // pc-i1440fx - /* - 0000000000000000-00000000000bffff (prio 0, ram): pc.ram KVM - 00000000000c0000-00000000000cafff (prio 0, rom): pc.ram @00000000000c0000 KVM - 00000000000cb000-00000000000cdfff (prio 0, ram): pc.ram @00000000000cb000 KVM - 00000000000ce000-00000000000e7fff (prio 0, rom): pc.ram @00000000000ce000 KVM - 00000000000e8000-00000000000effff (prio 0, ram): pc.ram @00000000000e8000 KVM - 00000000000f0000-00000000000fffff (prio 0, rom): pc.ram @00000000000f0000 KVM - 0000000000100000-00000000bfffffff (prio 0, ram): pc.ram @0000000000100000 KVM - 0000000100000000-000000023fffffff (prio 0, ram): pc.ram @00000000c0000000 KVM - */ - mem_map.push_range(Address::NULL, size::kb(768).into(), map_base.into()); // section: [start - 768kb] -> map to start - mem_map.push_range( - size::kb(812).into(), - size::kb(824).into(), - (map_base + size::kb(812)).into(), - ); // section: [768kb - 812kb] -> map to 768kb - mem_map.push_range( - size::kb(928).into(), - size::kb(960).into(), - (map_base + size::kb(928)).into(), - ); // section: [928kb - 960kb] -> map to 928kb - mem_map.push_range( - size::mb(1).into(), - size::gb(3).into(), - (map_base + size::mb(1)).into(), - ); // section: [1mb - 3gb] -> map to 1mb - mem_map.push_range( - size::gb(4).into(), - (map_size + size::gb(1)).into(), - (map_base + size::gb(3)).into(), - ); // section: [4gb - max] -> map to 3gb - } - info!("qemu machine mem_map: {:?}", mem_map); - - let iov_max = unsafe { sysconf(_SC_IOV_MAX) } as usize; - - Ok(Self { - pid: prc.stat.pid, - mem_map, - temp_iov: vec![ - IoSendVec { - 0: iovec { - iov_base: std::ptr::null_mut::(), - iov_len: 0 - } - }; - iov_max * 2 - ] - .into_boxed_slice(), - }) - } - - fn fill_iovec(addr: &Address, data: &[u8], liov: &mut IoSendVec, riov: &mut IoSendVec) { - let iov_len = data.len(); - - liov.0 = iovec { - iov_base: data.as_ptr() as *mut c_void, - iov_len, - }; - - riov.0 = iovec { - iov_base: addr.as_u64() as *mut c_void, - iov_len, - }; - } - - fn vm_error() -> Error { - match unsafe { *libc::__errno_location() } { - libc::EFAULT => Error::Connector("process_vm_readv failed: EFAULT (remote memory address is invalid)"), - libc::ENOMEM => Error::Connector("process_vm_readv failed: ENOMEM (unable to allocate memory for internal copies)"), - libc::EPERM => Error::Connector("process_vm_readv failed: EPERM (insifficient permissions to access the target address space)"), - libc::ESRCH => Error::Connector("process_vm_readv failed: ESRCH (process not found)"), - libc::EINVAL => Error::Connector("process_vm_readv failed: EINVAL (invalid value)"), - _ => Error::Connector("process_vm_readv failed: unknown error") - } - } -} - -impl PhysicalMemory for QemuProcfs { - fn phys_read_raw_list(&mut self, data: &mut [PhysicalReadData]) -> Result<()> { - let mem_map = &self.mem_map; - let temp_iov = &mut self.temp_iov; - - let mut void = FnExtend::void(); - let mut iter = mem_map.map_iter( - data.iter_mut() - .map(|PhysicalReadData(addr, buf)| (*addr, &mut **buf)), - &mut void, - ); - - let max_iov = temp_iov.len() / 2; - let (iov_local, iov_remote) = temp_iov.split_at_mut(max_iov); - - let mut elem = iter.next(); - - let mut iov_iter = iov_local.iter_mut().zip(iov_remote.iter_mut()).enumerate(); - let mut iov_next = iov_iter.next(); - - while let Some(((addr, _), out)) = elem { - let (cnt, (liov, riov)) = iov_next.unwrap(); - - Self::fill_iovec(&addr, out, liov, riov); - - iov_next = iov_iter.next(); - elem = iter.next(); - - if elem.is_none() || iov_next.is_none() { - if unsafe { - libc::process_vm_readv( - self.pid, - iov_local.as_ptr().cast(), - (cnt + 1) as c_ulong, - iov_remote.as_ptr().cast(), - (cnt + 1) as c_ulong, - 0, - ) - } == -1 - { - return Err(Self::vm_error()); - } - - iov_iter = iov_local.iter_mut().zip(iov_remote.iter_mut()).enumerate(); - iov_next = iov_iter.next(); - } - } - - Ok(()) - } - - fn phys_write_raw_list(&mut self, data: &[PhysicalWriteData]) -> Result<()> { - let mem_map = &self.mem_map; - let temp_iov = &mut self.temp_iov; - - let mut void = FnExtend::void(); - let mut iter = mem_map.map_iter(data.iter().copied().map(<_>::from), &mut void); - //let mut iter = mem_map.map_iter(data.iter(), &mut FnExtend::new(|_|{})); - - let max_iov = temp_iov.len() / 2; - let (iov_local, iov_remote) = temp_iov.split_at_mut(max_iov); - - let mut elem = iter.next(); - - let mut iov_iter = iov_local.iter_mut().zip(iov_remote.iter_mut()).enumerate(); - let mut iov_next = iov_iter.next(); - - while let Some(((addr, _), out)) = elem { - let (cnt, (liov, riov)) = iov_next.unwrap(); - - Self::fill_iovec(&addr, out, liov, riov); - - iov_next = iov_iter.next(); - elem = iter.next(); - - if elem.is_none() || iov_next.is_none() { - if unsafe { - libc::process_vm_writev( - self.pid, - iov_local.as_ptr().cast(), - (cnt + 1) as c_ulong, - iov_remote.as_ptr().cast(), - (cnt + 1) as c_ulong, - 0, - ) - } == -1 - { - return Err(Self::vm_error()); - } - - iov_iter = iov_local.iter_mut().zip(iov_remote.iter_mut()).enumerate(); - iov_next = iov_iter.next(); - } - } - - Ok(()) - } - - fn metadata(&self) -> PhysicalMemoryMetadata { - PhysicalMemoryMetadata { - size: self - .mem_map - .as_ref() - .iter() - .last() - .map(|map| map.base().as_usize() + map.output().1) - .unwrap(), - readonly: false, - } - } -} - -/// Creates a new Qemu Procfs Connector instance. -#[connector(name = "qemu_procfs")] -pub fn create_connector(args: &ConnectorArgs) -> Result { - if let Some(name) = args.get("name").or_else(|| args.get_default()) { - QemuProcfs::with_guest_name(name) - } else { - QemuProcfs::new() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_name() { - assert_eq!( - qemu_arg_opt( - &["-name".to_string(), "win10-test".to_string()], - "-name", - "guest" - ), - Some("win10-test".into()) - ); - assert_eq!( - qemu_arg_opt( - &[ - "-test".to_string(), - "-name".to_string(), - "win10-test".to_string() - ], - "-name", - "guest" - ), - Some("win10-test".into()) - ); - assert_eq!( - qemu_arg_opt( - &["-name".to_string(), "win10-test,arg=opt".to_string()], - "-name", - "guest" - ), - Some("win10-test".into()) - ); - assert_eq!( - qemu_arg_opt( - &["-name".to_string(), "guest=win10-test,arg=opt".to_string()], - "-name", - "guest" - ), - Some("win10-test".into()) - ); - assert_eq!( - qemu_arg_opt( - &["-name".to_string(), "arg=opt,guest=win10-test".to_string()], - "-name", - "guest" - ), - Some("win10-test".into()) - ); - assert_eq!( - qemu_arg_opt( - &["-name".to_string(), "arg=opt".to_string()], - "-name", - "guest" - ), - None - ); - } - - #[test] - fn test_machine() { - assert_eq!( - qemu_arg_opt( - &["-machine".to_string(), "q35".to_string()], - "-machine", - "type" - ), - Some("q35".into()) - ); - assert_eq!( - qemu_arg_opt( - &[ - "-test".to_string(), - "-machine".to_string(), - "q35".to_string() - ], - "-machine", - "type" - ), - Some("q35".into()) - ); - assert_eq!( - qemu_arg_opt( - &["-machine".to_string(), "q35,arg=opt".to_string()], - "-machine", - "type" - ), - Some("q35".into()) - ); - assert_eq!( - qemu_arg_opt( - &["-machine".to_string(), "type=pc,arg=opt".to_string()], - "-machine", - "type" - ), - Some("pc".into()) - ); - assert_eq!( - qemu_arg_opt( - &[ - "-machine".to_string(), - "arg=opt,type=pc-i1440fx".to_string() - ], - "-machine", - "type" - ), - Some("pc-i1440fx".into()) - ); - assert_eq!( - qemu_arg_opt( - &["-machine".to_string(), "arg=opt".to_string()], - "-machine", - "type" - ), - None - ); - } -} diff --git a/apex_dma/memflow_lib/memflow-win32-ffi/.gitignore b/apex_dma/memflow_lib/memflow-win32-ffi/.gitignore deleted file mode 100644 index 50a5bd4..0000000 --- a/apex_dma/memflow_lib/memflow-win32-ffi/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -/target -**/*.rs.bk -bindings -**/node_modules -**/*.out -**/*.o diff --git a/apex_dma/memflow_lib/memflow-win32-ffi/Cargo.toml b/apex_dma/memflow_lib/memflow-win32-ffi/Cargo.toml deleted file mode 100644 index 68cd1a6..0000000 --- a/apex_dma/memflow_lib/memflow-win32-ffi/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "memflow-win32-ffi" -version = "0.1.5" -authors = ["Aurimas Blažulionis <0x60@pm.me>"] -edition = "2018" -description = "C bindings to memflow-win32" -documentation = "https://docs.rs/memflow-win32-ffi" -readme = "README.md" -homepage = "https://memflow.github.io" -repository = "https://github.com/memflow/memflow" -license-file = "../LICENSE" -keywords = [ "memflow", "introspection", "memory", "dma" ] -categories = [ "api-bindings", "memory-management", "os" ] - -[badges] -maintenance = { status = "actively-developed" } -codecov = { repository = "github", branch = "master", service = "github" } - -[lib] -name = "memflow_win32_ffi" -crate-type = ["lib", "cdylib", "staticlib"] - -[dependencies] -memflow-win32 = { version = "0.1", path = "../memflow-win32" } -memflow = { version = "0.1", path = "../memflow" } -memflow-ffi = { version = "0.1", path = "../memflow-ffi" } diff --git a/apex_dma/memflow_lib/memflow-win32-ffi/README.md b/apex_dma/memflow_lib/memflow-win32-ffi/README.md deleted file mode 100644 index 13bb90d..0000000 --- a/apex_dma/memflow_lib/memflow-win32-ffi/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# memflow-win32-ffi -[![Crates.io](https://img.shields.io/crates/v/memflow.svg)](https://crates.io/crates/memflow) -![build and test](https://github.com/memflow/memflow/workflows/Build%20and%20test/badge.svg?branch=dev) -[![codecov](https://codecov.io/gh/memflow/memflow/branch/master/graph/badge.svg?token=XT7R158N6W)](https://codecov.io/gh/memflow/memflow) -[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE) -[![Discord](https://img.shields.io/discord/738739624976973835?color=%20%237289da&label=Discord)](https://discord.gg/afsEtMR) - -The [memflow](https://github.com/memflow/memflow) win32 FFI crate provides an interface to the memflow-win32 API for C/C++. Currently a single `memflow_win32.h` file is generated aside from the dynamic library that can be used to interact with memflow. - -This FFI library is intended to be used in combination with the [memflow-ffi](https://github.com/memflow/memflow/memflow-ffi) library. - -A simple example that initializes the memflow-ffi and memflow-win32-ffi: -```cpp -#include "memflow_win32.h" -#include - -int main(int argc, char *argv[]) { - log_init(1); - - ConnectorInventory *inv = inventory_try_new(); - printf("inv: %p\n", inv); - - const char *conn_name = argc > 1? argv[1]: "kvm"; - const char *conn_arg = argc > 2? argv[2]: ""; - - CloneablePhysicalMemoryObj *conn = - inventory_create_connector(inv, conn_name, conn_arg); - printf("conn: %p\n", conn); - - if (conn) { - Kernel *kernel = kernel_build(conn); - printf("Kernel: %p\n", kernel); - Win32Version ver = kernel_winver(kernel); - printf("major: %d\n", ver.nt_major_version); - printf("minor: %d\n", ver.nt_minor_version); - printf("build: %d\n", ver.nt_build_number); - - kernel_free(kernel); - } - - inventory_free(inv); - - return 0; -} -``` - -Additional examples can be found in the `examples` folder as well as in the [memflow-ffi](https://github.com/memflow/memflow/memflow-ffi) crate. diff --git a/apex_dma/memflow_lib/memflow-win32-ffi/bindgen.sh b/apex_dma/memflow_lib/memflow-win32-ffi/bindgen.sh deleted file mode 100644 index 337d60b..0000000 --- a/apex_dma/memflow_lib/memflow-win32-ffi/bindgen.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -cargo build --release --workspace -cbindgen --config cbindgen.toml --crate memflow-win32-ffi --output memflow_win32.h diff --git a/apex_dma/memflow_lib/memflow-win32-ffi/cbindgen.toml b/apex_dma/memflow_lib/memflow-win32-ffi/cbindgen.toml deleted file mode 100644 index 5a8b6d5..0000000 --- a/apex_dma/memflow_lib/memflow-win32-ffi/cbindgen.toml +++ /dev/null @@ -1,22 +0,0 @@ -language = "C" - -include_guard = "MEMFLOW_WIN32_H" -tab_width = 4 -documentation_style = "doxy" -style = "both" -cpp_compat = true -includes = ["memflow.h"] - -[parse] -parse_deps = true - -include = ["memflow-win32", "memflow"] - -[macro_expansion] -bitflags = true - -[fn] -sort_by = "None" - -[export] -exclude = ["PageType", "Address"] diff --git a/apex_dma/memflow_lib/memflow-win32-ffi/examples/Makefile b/apex_dma/memflow_lib/memflow-win32-ffi/examples/Makefile deleted file mode 100644 index 8b76722..0000000 --- a/apex_dma/memflow_lib/memflow-win32-ffi/examples/Makefile +++ /dev/null @@ -1,22 +0,0 @@ -CC =gcc -CFLAGS =-I../ -I../../memflow-ffi/ -L../../target/release -LIBS=-lm -ldl -lpthread -l:libmemflow_win32_ffi.a - -ODIR=./ - -%.o: %.c $(DEPS) - $(CC) -c -o $@ $< $(CFLAGS) - -process_list.out: process_list.o - $(CC) -o $@ $^ $(CFLAGS) $(LIBS) - -dump_header.out: dump_header.o - $(CC) -o $@ $^ $(CFLAGS) $(LIBS) - -.PHONY: all -all: process_list.out dump_header.out - -.DEFAULT_GOAL := all - -clean: - rm -f $(ODIR)/*.o diff --git a/apex_dma/memflow_lib/memflow-win32-ffi/examples/dump_header.c b/apex_dma/memflow_lib/memflow-win32-ffi/examples/dump_header.c deleted file mode 100644 index f8fd1ba..0000000 --- a/apex_dma/memflow_lib/memflow-win32-ffi/examples/dump_header.c +++ /dev/null @@ -1,61 +0,0 @@ -#include "memflow_win32.h" -#include - -int main(int argc, char *argv[]) { - log_init(1); - - ConnectorInventory *inv = inventory_try_new(); - printf("inv: %p\n", inv); - - const char *conn_name = argc > 1? argv[1]: "kvm"; - const char *conn_arg = argc > 2? argv[2]: ""; - - const char *proc_name = argc > 3? argv[3]: "lsass.exe"; - const char *dll_name = argc > 4? argv[4]: "ntdll.dll"; - - CloneablePhysicalMemoryObj *conn = inventory_create_connector(inv, conn_name, conn_arg); - printf("conn: %p\n", conn); - - if (conn) { - Kernel *kernel = kernel_build(conn); - printf("Kernel: %p\n", kernel); - Win32Version ver = kernel_winver(kernel); - printf("major: %d\n", ver.nt_major_version); - printf("minor: %d\n", ver.nt_minor_version); - printf("build: %d\n", ver.nt_build_number); - - Win32Process *process = kernel_into_process(kernel, proc_name); - - if (process) { - Win32ModuleInfo *module = process_module_info(process, dll_name); - - if (module) { - OsProcessModuleInfoObj *obj = module_info_trait(module); - Address base = os_process_module_base(obj); - os_process_module_free(obj); - VirtualMemoryObj *virt_mem = process_virt_mem(process); - - char header[256]; - if (!virt_read_raw_into(virt_mem, base, header, 256)) { - printf("Read successful!\n"); - for (int o = 0; o < 8; o++) { - for (int i = 0; i < 32; i++) { - printf("%2hhx ", header[o * 32 + i]); - } - printf("\n"); - } - } else { - printf("Failed to read!\n"); - } - - virt_free(virt_mem); - } - - process_free(process); - } - } - - inventory_free(inv); - - return 0; -} diff --git a/apex_dma/memflow_lib/memflow-win32-ffi/examples/process_list.c b/apex_dma/memflow_lib/memflow-win32-ffi/examples/process_list.c deleted file mode 100644 index aa3b4a4..0000000 --- a/apex_dma/memflow_lib/memflow-win32-ffi/examples/process_list.c +++ /dev/null @@ -1,54 +0,0 @@ -#include "memflow_win32.h" -#include - -int main(int argc, char *argv[]) { - log_init(1); - - ConnectorInventory *inv = inventory_scan(); - printf("inv: %p\n", inv); - - const char *conn_name = argc > 1? argv[1]: "kvm"; - const char *conn_arg = argc > 2? argv[2]: ""; - - CloneablePhysicalMemoryObj *conn = inventory_create_connector(inv, conn_name, conn_arg); - printf("conn: %p\n", conn); - - if (conn) { - Kernel *kernel = kernel_build(conn); - printf("Kernel: %p\n", kernel); - Win32Version ver = kernel_winver(kernel); - printf("major: %d\n", ver.nt_major_version); - printf("minor: %d\n", ver.nt_minor_version); - printf("build: %d\n", ver.nt_build_number); - - Win32ProcessInfo *processes[512]; - size_t process_count = kernel_process_info_list(kernel, processes, 512); - - printf("Process List:\n"); - printf("%-8s | %-16s | %-16s | %-12s | %-5s\n", "PID", "Name", "Base", "DTB", "Wow64"); - - for (size_t i = 0; i < process_count; i++) { - Win32ProcessInfo *process = processes[i]; - OsProcessInfoObj *info = process_info_trait(process); - char name[32]; - os_process_info_name(info, name, 32); - - printf("%-8d | %-16s | %-16lx | %-12lx | %-5s\n", - os_process_info_pid(info), - name, - process_info_section_base(process), - process_info_dtb(process), - process_info_wow64(process)? "Yes" : "No" - ); - - os_process_info_free(info); - process_info_free(process); - } - - kernel_free(kernel); - } - - inventory_free(inv); - - return 0; -} diff --git a/apex_dma/memflow_lib/memflow-win32-ffi/memflow_win32.h b/apex_dma/memflow_lib/memflow-win32-ffi/memflow_win32.h deleted file mode 100644 index a292d9c..0000000 --- a/apex_dma/memflow_lib/memflow-win32-ffi/memflow_win32.h +++ /dev/null @@ -1,321 +0,0 @@ -#ifndef MEMFLOW_WIN32_H -#define MEMFLOW_WIN32_H - -#include -#include -#include -#include -#include "memflow.h" - -typedef struct Kernel_FFIMemory__FFIVirtualTranslate Kernel_FFIMemory__FFIVirtualTranslate; - -typedef struct Win32ModuleInfo Win32ModuleInfo; - -typedef struct Win32ProcessInfo Win32ProcessInfo; - -typedef struct Win32Process_FFIVirtualMemory Win32Process_FFIVirtualMemory; - -typedef Kernel_FFIMemory__FFIVirtualTranslate Kernel; - -typedef struct StartBlock { - Address kernel_hint; - Address dtb; -} StartBlock; - -typedef struct Win32Version { - uint32_t nt_major_version; - uint32_t nt_minor_version; - uint32_t nt_build_number; -} Win32Version; - -/** - * Type alias for a PID. - */ -typedef uint32_t PID; - -typedef Win32Process_FFIVirtualMemory Win32Process; - -typedef struct Win32ArchOffsets { - uintptr_t peb_ldr; - uintptr_t ldr_list; - uintptr_t ldr_data_base; - uintptr_t ldr_data_size; - uintptr_t ldr_data_full_name; - uintptr_t ldr_data_base_name; -} Win32ArchOffsets; - -typedef struct Win32ModuleListInfo { - Address module_base; - Win32ArchOffsets offsets; -} Win32ModuleListInfo; - -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -/** - * Build a cloneable kernel object with default caching parameters - * - * This function will take ownership of the input `mem` object. - * - * # Safety - * - * `mem` must be a heap allocated memory reference, created by one of the API's functions. - * Reference to it becomes invalid. - */ -Kernel *kernel_build(CloneablePhysicalMemoryObj *mem); - -/** - * Build a cloneable kernel object with custom caching parameters - * - * This function will take ownership of the input `mem` object. - * - * vat_cache_entries must be positive, or the program will panic upon memory reads or writes. - * - * # Safety - * - * `mem` must be a heap allocated memory reference, created by one of the API's functions. - * Reference to it becomes invalid. - */ -Kernel *kernel_build_custom(CloneablePhysicalMemoryObj *mem, - uint64_t page_cache_time_ms, - PageType page_cache_flags, - uintptr_t page_cache_size_kb, - uint64_t vat_cache_time_ms, - uintptr_t vat_cache_entries); - -Kernel *kernel_clone(const Kernel *kernel); - -/** - * Free a kernel object - * - * This will free the input `kernel` object (including the underlying memory object) - * - * # Safety - * - * `kernel` must be a valid reference heap allocated by one of the above functions. - */ -void kernel_free(Kernel *kernel); - -/** - * Destroy a kernel object and return its underlying memory object - * - * This will free the input `kernel` object, and return the underlying memory object. It will free - * the object from any additional caching that `kernel` had in place. - * - * # Safety - * - * `kernel` must be a valid reference heap allocated by one of the above functions. - */ -CloneablePhysicalMemoryObj *kernel_destroy(Kernel *kernel); - -StartBlock kernel_start_block(const Kernel *kernel); - -Win32Version kernel_winver(const Kernel *kernel); - -Win32Version kernel_winver_unmasked(const Kernel *kernel); - -/** - * Retrieve a list of peorcess addresses - * - * # Safety - * - * `buffer` must be a valid buffer of size at least `max_size` - */ -uintptr_t kernel_eprocess_list(Kernel *kernel, Address *buffer, uintptr_t max_size); - -/** - * Retrieve a list of processes - * - * This will fill `buffer` with a list of win32 process information. These processes will need to be - * individually freed with `process_info_free` - * - * # Safety - * - * `buffer` must be a valid that can contain at least `max_size` references to `Win32ProcessInfo`. - */ -uintptr_t kernel_process_info_list(Kernel *kernel, Win32ProcessInfo **buffer, uintptr_t max_size); - -Win32ProcessInfo *kernel_kernel_process_info(Kernel *kernel); - -Win32ProcessInfo *kernel_process_info_from_eprocess(Kernel *kernel, Address eprocess); - -/** - * Retrieve process information by name - * - * # Safety - * - * `name` must be a valid null terminated string - */ -Win32ProcessInfo *kernel_process_info(Kernel *kernel, const char *name); - -Win32ProcessInfo *kernel_process_info_pid(Kernel *kernel, PID pid); - -/** - * Create a process by looking up its name - * - * This will consume `kernel` and free it later on. - * - * # Safety - * - * `name` must be a valid null terminated string - * - * `kernel` must be a valid reference to `Kernel`. After the function the reference to it becomes - * invalid. - */ -Win32Process *kernel_into_process(Kernel *kernel, const char *name); - -/** - * Create a process by looking up its PID - * - * This will consume `kernel` and free it later on. - * - * # Safety - * - * `kernel` must be a valid reference to `Kernel`. After the function the reference to it becomes - * invalid. - */ -Win32Process *kernel_into_process_pid(Kernel *kernel, PID pid); - -/** - * Create a kernel process insatance - * - * This will consume `kernel` and free it later on. - * - * # Safety - * - * `kernel` must be a valid reference to `Kernel`. After the function the reference to it becomes - * invalid. - */ -Win32Process *kernel_into_kernel_process(Kernel *kernel); - -OsProcessModuleInfoObj *module_info_trait(Win32ModuleInfo *info); - -/** - * Free a win32 module info instance. - * - * Note that it is not the same as `OsProcessModuleInfoObj`, and those references need to be freed - * manually. - * - * # Safety - * - * `info` must be a unique heap allocated reference to `Win32ModuleInfo`, and after this call the - * reference will become invalid. - */ -void module_info_free(Win32ModuleInfo *info); - -/** - * Create a process with kernel and process info - * - * # Safety - * - * `kernel` must be a valid heap allocated reference to a `Kernel` object. After the function - * call, the reference becomes invalid. - */ -Win32Process *process_with_kernel(Kernel *kernel, const Win32ProcessInfo *proc_info); - -/** - * Retrieve refernce to the underlying virtual memory object - * - * This will return a static reference to the virtual memory object. It will only be valid as long - * as `process` if valid, and needs to be freed manually using `virt_free` regardless if the - * process if freed or not. - */ -VirtualMemoryObj *process_virt_mem(Win32Process *process); - -Win32Process *process_clone(const Win32Process *process); - -/** - * Frees the `process` - * - * # Safety - * - * `process` must be a valid heap allocated reference to a `Win32Process` object. After the - * function returns, the reference becomes invalid. - */ -void process_free(Win32Process *process); - -/** - * Retrieve a process module list - * - * This will fill up to `max_len` elements into `out` with references to `Win32ModuleInfo` objects. - * - * These references then need to be freed with `module_info_free` - * - * # Safety - * - * `out` must be a valid buffer able to contain `max_len` references to `Win32ModuleInfo`. - */ -uintptr_t process_module_list(Win32Process *process, Win32ModuleInfo **out, uintptr_t max_len); - -/** - * Retrieve the main module of the process - * - * This function searches for a module with a base address - * matching the section_base address from the ProcessInfo structure. - * It then returns a reference to a newly allocated - * `Win32ModuleInfo` object, if a module was found (null otherwise). - * - * The reference later needs to be freed with `module_info_free` - * - * # Safety - * - * `process` must be a valid Win32Process pointer. - */ -Win32ModuleInfo *process_main_module_info(Win32Process *process); - -/** - * Lookup a module - * - * This will search for a module called `name`, and return a reference to a newly allocated - * `Win32ModuleInfo` object, if a module was found (null otherwise). - * - * The reference later needs to be freed with `module_info_free` - * - * # Safety - * - * `process` must be a valid Win32Process pointer. - * `name` must be a valid null terminated string. - */ -Win32ModuleInfo *process_module_info(Win32Process *process, const char *name); - -OsProcessInfoObj *process_info_trait(Win32ProcessInfo *info); - -Address process_info_dtb(const Win32ProcessInfo *info); - -Address process_info_section_base(const Win32ProcessInfo *info); - -int32_t process_info_exit_status(const Win32ProcessInfo *info); - -Address process_info_ethread(const Win32ProcessInfo *info); - -Address process_info_wow64(const Win32ProcessInfo *info); - -Address process_info_peb(const Win32ProcessInfo *info); - -Address process_info_peb_native(const Win32ProcessInfo *info); - -Address process_info_peb_wow64(const Win32ProcessInfo *info); - -Address process_info_teb(const Win32ProcessInfo *info); - -Address process_info_teb_wow64(const Win32ProcessInfo *info); - -Win32ModuleListInfo process_info_module_info(const Win32ProcessInfo *info); - -Win32ModuleListInfo process_info_module_info_native(const Win32ProcessInfo *info); - -/** - * Free a process information reference - * - * # Safety - * - * `info` must be a valid heap allocated reference to a Win32ProcessInfo structure - */ -void process_info_free(Win32ProcessInfo *info); - -#ifdef __cplusplus -} // extern "C" -#endif // __cplusplus - -#endif /* MEMFLOW_WIN32_H */ diff --git a/apex_dma/memflow_lib/memflow-win32-ffi/memflow_win32_cpp.h b/apex_dma/memflow_lib/memflow-win32-ffi/memflow_win32_cpp.h deleted file mode 100644 index 961f4ef..0000000 --- a/apex_dma/memflow_lib/memflow-win32-ffi/memflow_win32_cpp.h +++ /dev/null @@ -1,151 +0,0 @@ -#ifndef MEMFLOW_WIN32_HLAPI_H -#define MEMFLOW_WIN32_HLAPI_H - -#include "memflow_cpp.h" -#include "memflow_win32.h" -#include "binddestr.h" - -#ifndef NO_STL_CONTAINERS -#include -// Maximum number of entries allowed in the returned lists -#ifndef AUTO_VEC_SIZE -#define AUTO_VEC_SIZE 2048 -#endif -#endif - -struct CKernel; - -struct CWin32ModuleInfo - : BindDestr -{ - CWin32ModuleInfo(Win32ModuleInfo *modinfo) - : BindDestr(modinfo) {} - - WRAP_FN_TYPE(COsProcessModuleInfo, module, info_trait); -}; - -struct CWin32Process - : BindDestr -{ - CWin32Process(Win32Process *process) - : BindDestr(process) {} - - CWin32Process(CKernel &kernel, Win32ProcessInfo *info); - - WRAP_FN_TYPE(CWin32ModuleInfo, process, module_info); - WRAP_FN_TYPE(CVirtualMemory, process, virt_mem); -}; - -struct CWin32ProcessInfo - : BindDestr -{ - CWin32ProcessInfo(Win32ProcessInfo *info) - : BindDestr(info) {} - - WRAP_FN_TYPE(COsProcessInfo, process_info, trait); - WRAP_FN(process_info, dtb); - WRAP_FN(process_info, section_base); - WRAP_FN(process_info, wow64); - WRAP_FN(process_info, peb); - WRAP_FN(process_info, peb_native); - WRAP_FN(process_info, peb_wow64); - WRAP_FN(process_info, teb); - WRAP_FN(process_info, teb_wow64); - WRAP_FN(process_info, module_info); - WRAP_FN(process_info, module_info_native); - - inline operator COsProcessInfo() { - return this->trait(); - } -}; - -struct CKernel - : BindDestr -{ - CKernel(Kernel *kernel) - : BindDestr(kernel) {} - - CKernel(CCloneablePhysicalMemory &mem) - : BindDestr(kernel_build(mem.invalidate())) {} - - CKernel( - CCloneablePhysicalMemory &mem, - uint64_t page_cache_time_ms, - PageType page_cache_flags, - uintptr_t page_cache_size_kb, - uint64_t vat_cache_time_ms, - uintptr_t vat_cache_entries - ) : BindDestr(kernel_build_custom( - mem.invalidate(), - page_cache_time_ms, - page_cache_flags, - page_cache_size_kb, - vat_cache_time_ms, - vat_cache_entries - )) {} - - WRAP_FN_TYPE(CKernel, kernel, clone); - WRAP_FN_TYPE_INVALIDATE(CCloneablePhysicalMemory, kernel, destroy); - WRAP_FN(kernel, start_block); - WRAP_FN(kernel, winver); - WRAP_FN(kernel, winver_unmasked); - WRAP_FN(kernel, eprocess_list); - WRAP_FN(kernel, process_info_list); - WRAP_FN_TYPE(CWin32ProcessInfo, kernel, kernel_process_info); - WRAP_FN_TYPE(CWin32ProcessInfo, kernel, process_info_from_eprocess); - WRAP_FN_TYPE(CWin32ProcessInfo, kernel, process_info); - WRAP_FN_TYPE(CWin32ProcessInfo, kernel, process_info_pid); - WRAP_FN_TYPE_INVALIDATE(CWin32Process, kernel, into_process); - WRAP_FN_TYPE_INVALIDATE(CWin32Process, kernel, into_process_pid); - WRAP_FN_TYPE_INVALIDATE(CWin32Process, kernel, into_kernel_process); - -#ifndef NO_STL_CONTAINERS - // Manual eprocess_list impl - std::vector
eprocess_vec(size_t max_size) { - Address *buf = (Address *)malloc(sizeof(Address *) * max_size); - std::vector
ret; - - if (buf) { - size_t size = kernel_eprocess_list(this->inner, buf, max_size); - - for (size_t i = 0; i < size; i++) - ret.push_back(buf[i]); - - free(buf); - } - - return ret; - } - - std::vector
eprocess_vec() { - return this->eprocess_vec(AUTO_VEC_SIZE); - } - - // Manual process_info_list impl - std::vector process_info_vec(size_t max_size) { - Win32ProcessInfo **buf = (Win32ProcessInfo **)malloc(sizeof(Win32ProcessInfo *) * max_size); - std::vector ret; - - if (buf) { - size_t size = kernel_process_info_list(this->inner, buf, max_size); - - for (size_t i = 0; i < size; i++) - ret.push_back(CWin32ProcessInfo(buf[i])); - - free(buf); - } - - return ret; - } - - std::vector process_info_vec() { - return this->process_info_vec(AUTO_VEC_SIZE); - } -#endif -}; - -// Extra constructors we couldn't define inside the classes -CWin32Process::CWin32Process(CKernel &kernel, Win32ProcessInfo *info) - : BindDestr(process_with_kernel(kernel.invalidate(), info)) {} - -#endif diff --git a/apex_dma/memflow_lib/memflow-win32-ffi/src/kernel/mod.rs b/apex_dma/memflow_lib/memflow-win32-ffi/src/kernel/mod.rs deleted file mode 100644 index 0ef541c..0000000 --- a/apex_dma/memflow_lib/memflow-win32-ffi/src/kernel/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod start_block; diff --git a/apex_dma/memflow_lib/memflow-win32-ffi/src/kernel/start_block.rs b/apex_dma/memflow_lib/memflow-win32-ffi/src/kernel/start_block.rs deleted file mode 100644 index aa20c3b..0000000 --- a/apex_dma/memflow_lib/memflow-win32-ffi/src/kernel/start_block.rs +++ /dev/null @@ -1,17 +0,0 @@ -use memflow::types::Address; -use memflow_win32::kernel; - -#[repr(C)] -pub struct StartBlock { - pub kernel_hint: Address, - pub dtb: Address, -} - -impl From for StartBlock { - fn from(o: kernel::StartBlock) -> StartBlock { - StartBlock { - kernel_hint: o.kernel_hint, - dtb: o.dtb, - } - } -} diff --git a/apex_dma/memflow_lib/memflow-win32-ffi/src/lib.rs b/apex_dma/memflow_lib/memflow-win32-ffi/src/lib.rs deleted file mode 100644 index de4ae2e..0000000 --- a/apex_dma/memflow_lib/memflow-win32-ffi/src/lib.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod kernel; -pub mod win32; diff --git a/apex_dma/memflow_lib/memflow-win32-ffi/src/win32/kernel.rs b/apex_dma/memflow_lib/memflow-win32-ffi/src/win32/kernel.rs deleted file mode 100644 index cdd3ad1..0000000 --- a/apex_dma/memflow_lib/memflow-win32-ffi/src/win32/kernel.rs +++ /dev/null @@ -1,331 +0,0 @@ -use memflow_ffi::mem::phys_mem::CloneablePhysicalMemoryObj; -use memflow_ffi::util::*; -use memflow_win32::kernel::Win32Version; -use memflow_win32::win32::{kernel, Win32ProcessInfo, Win32VirtualTranslate}; - -use memflow::mem::{ - cache::{CachedMemoryAccess, CachedVirtualTranslate, TimedCacheValidator}, - CloneablePhysicalMemory, DirectTranslate, VirtualDMA, -}; - -use memflow::iter::FnExtend; -use memflow::process::PID; -use memflow::types::{size, Address, PageType}; - -use super::process::Win32Process; -use crate::kernel::start_block::StartBlock; - -use std::ffi::CStr; -use std::os::raw::c_char; -use std::time::Duration; - -pub(crate) type FFIMemory = - CachedMemoryAccess<'static, Box, TimedCacheValidator>; -pub(crate) type FFIVirtualTranslate = CachedVirtualTranslate; - -pub(crate) type FFIVirtualMemory = - VirtualDMA; - -pub type Kernel = kernel::Kernel; - -/// Build a cloneable kernel object with default caching parameters -/// -/// This function will take ownership of the input `mem` object. -/// -/// # Safety -/// -/// `mem` must be a heap allocated memory reference, created by one of the API's functions. -/// Reference to it becomes invalid. -#[no_mangle] -pub unsafe extern "C" fn kernel_build( - mem: &'static mut CloneablePhysicalMemoryObj, -) -> Option<&'static mut Kernel> { - let mem: Box = Box::from_raw(*Box::from_raw(mem)); - kernel::Kernel::builder(mem) - .build_default_caches() - .build() - .map_err(inspect_err) - .ok() - .map(to_heap) -} - -/// Build a cloneable kernel object with custom caching parameters -/// -/// This function will take ownership of the input `mem` object. -/// -/// vat_cache_entries must be positive, or the program will panic upon memory reads or writes. -/// -/// # Safety -/// -/// `mem` must be a heap allocated memory reference, created by one of the API's functions. -/// Reference to it becomes invalid. -#[no_mangle] -pub unsafe extern "C" fn kernel_build_custom( - mem: &'static mut CloneablePhysicalMemoryObj, - page_cache_time_ms: u64, - page_cache_flags: PageType, - page_cache_size_kb: usize, - vat_cache_time_ms: u64, - vat_cache_entries: usize, -) -> Option<&'static mut Kernel> { - let mem: Box = Box::from_raw(*Box::from_raw(mem)); - kernel::Kernel::builder(mem) - .build_page_cache(move |connector, arch| { - CachedMemoryAccess::builder(connector) - .arch(arch) - .validator(TimedCacheValidator::new( - Duration::from_millis(page_cache_time_ms).into(), - )) - .page_type_mask(page_cache_flags) - .cache_size(size::kb(page_cache_size_kb)) - .build() - .unwrap() - }) - .build_vat_cache(move |vat, arch| { - CachedVirtualTranslate::builder(vat) - .arch(arch) - .validator(TimedCacheValidator::new( - Duration::from_millis(vat_cache_time_ms).into(), - )) - .entries(vat_cache_entries) - .build() - .unwrap() - }) - .build() - .map_err(inspect_err) - .ok() - .map(to_heap) -} - -#[no_mangle] -pub extern "C" fn kernel_clone(kernel: &Kernel) -> &'static mut Kernel { - Box::leak(Box::new((*kernel).clone())) -} - -/// Free a kernel object -/// -/// This will free the input `kernel` object (including the underlying memory object) -/// -/// # Safety -/// -/// `kernel` must be a valid reference heap allocated by one of the above functions. -#[no_mangle] -pub unsafe extern "C" fn kernel_free(kernel: &'static mut Kernel) { - let _ = Box::from_raw(kernel); -} - -/// Destroy a kernel object and return its underlying memory object -/// -/// This will free the input `kernel` object, and return the underlying memory object. It will free -/// the object from any additional caching that `kernel` had in place. -/// -/// # Safety -/// -/// `kernel` must be a valid reference heap allocated by one of the above functions. -#[no_mangle] -pub unsafe extern "C" fn kernel_destroy( - kernel: &'static mut Kernel, -) -> &'static mut CloneablePhysicalMemoryObj { - let kernel = Box::from_raw(kernel); - Box::leak(Box::new(Box::leak(kernel.destroy().destroy()))) -} - -#[no_mangle] -pub extern "C" fn kernel_start_block(kernel: &Kernel) -> StartBlock { - kernel.kernel_info.start_block.into() -} - -#[no_mangle] -pub extern "C" fn kernel_winver(kernel: &Kernel) -> Win32Version { - kernel.kernel_info.kernel_winver.mask_build_number() -} - -#[no_mangle] -pub extern "C" fn kernel_winver_unmasked(kernel: &Kernel) -> Win32Version { - kernel.kernel_info.kernel_winver -} - -/// Retrieve a list of peorcess addresses -/// -/// # Safety -/// -/// `buffer` must be a valid buffer of size at least `max_size` -#[no_mangle] -pub unsafe extern "C" fn kernel_eprocess_list( - kernel: &'static mut Kernel, - buffer: *mut Address, - max_size: usize, -) -> usize { - let mut ret = 0; - - let buffer = std::slice::from_raw_parts_mut(buffer, max_size); - - let mut extend_fn = FnExtend::new(|addr| { - if ret < max_size { - buffer[ret] = addr; - ret += 1; - } - }); - - kernel - .eprocess_list_extend(&mut extend_fn) - .map_err(inspect_err) - .ok() - .map(|_| ret) - .unwrap_or_default() -} - -/// Retrieve a list of processes -/// -/// This will fill `buffer` with a list of win32 process information. These processes will need to be -/// individually freed with `process_info_free` -/// -/// # Safety -/// -/// `buffer` must be a valid that can contain at least `max_size` references to `Win32ProcessInfo`. -#[no_mangle] -pub unsafe extern "C" fn kernel_process_info_list( - kernel: &'static mut Kernel, - buffer: *mut &'static mut Win32ProcessInfo, - max_size: usize, -) -> usize { - let mut ret = 0; - - let buffer = std::slice::from_raw_parts_mut(buffer, max_size); - - let mut extend_fn = FnExtend::new(|info| { - if ret < max_size { - buffer[ret] = Box::leak(Box::new(info)); - ret += 1; - } - }); - - kernel - .process_info_list_extend(&mut extend_fn) - .map_err(inspect_err) - .ok() - .map(|_| ret) - .unwrap_or_default() -} - -// Process info - -#[no_mangle] -pub extern "C" fn kernel_kernel_process_info( - kernel: &'static mut Kernel, -) -> Option<&'static mut Win32ProcessInfo> { - kernel - .kernel_process_info() - .map_err(inspect_err) - .ok() - .map(to_heap) -} - -#[no_mangle] -pub extern "C" fn kernel_process_info_from_eprocess( - kernel: &'static mut Kernel, - eprocess: Address, -) -> Option<&'static mut Win32ProcessInfo> { - kernel - .process_info_from_eprocess(eprocess) - .map_err(inspect_err) - .ok() - .map(to_heap) -} - -/// Retrieve process information by name -/// -/// # Safety -/// -/// `name` must be a valid null terminated string -#[no_mangle] -pub unsafe extern "C" fn kernel_process_info( - kernel: &'static mut Kernel, - name: *const c_char, -) -> Option<&'static mut Win32ProcessInfo> { - let name = CStr::from_ptr(name).to_string_lossy(); - kernel - .process_info(&name) - .map_err(inspect_err) - .ok() - .map(to_heap) -} - -#[no_mangle] -pub extern "C" fn kernel_process_info_pid( - kernel: &'static mut Kernel, - pid: PID, -) -> Option<&'static mut Win32ProcessInfo> { - kernel - .process_info_pid(pid) - .map_err(inspect_err) - .ok() - .map(to_heap) -} - -// Process conversion - -/// Create a process by looking up its name -/// -/// This will consume `kernel` and free it later on. -/// -/// # Safety -/// -/// `name` must be a valid null terminated string -/// -/// `kernel` must be a valid reference to `Kernel`. After the function the reference to it becomes -/// invalid. -#[no_mangle] -pub unsafe extern "C" fn kernel_into_process( - kernel: &'static mut Kernel, - name: *const c_char, -) -> Option<&'static mut Win32Process> { - let kernel = Box::from_raw(kernel); - let name = CStr::from_ptr(name).to_string_lossy(); - kernel - .into_process(&name) - .map_err(inspect_err) - .ok() - .map(to_heap) -} - -/// Create a process by looking up its PID -/// -/// This will consume `kernel` and free it later on. -/// -/// # Safety -/// -/// `kernel` must be a valid reference to `Kernel`. After the function the reference to it becomes -/// invalid. -#[no_mangle] -pub unsafe extern "C" fn kernel_into_process_pid( - kernel: &'static mut Kernel, - pid: PID, -) -> Option<&'static mut Win32Process> { - let kernel = Box::from_raw(kernel); - kernel - .into_process_pid(pid) - .map_err(inspect_err) - .ok() - .map(to_heap) -} - -/// Create a kernel process insatance -/// -/// This will consume `kernel` and free it later on. -/// -/// # Safety -/// -/// `kernel` must be a valid reference to `Kernel`. After the function the reference to it becomes -/// invalid. -#[no_mangle] -pub unsafe extern "C" fn kernel_into_kernel_process( - kernel: &'static mut Kernel, -) -> Option<&'static mut Win32Process> { - let kernel = Box::from_raw(kernel); - kernel - .into_kernel_process() - .map_err(inspect_err) - .ok() - .map(to_heap) -} diff --git a/apex_dma/memflow_lib/memflow-win32-ffi/src/win32/mod.rs b/apex_dma/memflow_lib/memflow-win32-ffi/src/win32/mod.rs deleted file mode 100644 index 8157daf..0000000 --- a/apex_dma/memflow_lib/memflow-win32-ffi/src/win32/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod kernel; -pub mod module; -pub mod process; -pub mod process_info; diff --git a/apex_dma/memflow_lib/memflow-win32-ffi/src/win32/module.rs b/apex_dma/memflow_lib/memflow-win32-ffi/src/win32/module.rs deleted file mode 100644 index 6badc87..0000000 --- a/apex_dma/memflow_lib/memflow-win32-ffi/src/win32/module.rs +++ /dev/null @@ -1,24 +0,0 @@ -use memflow_ffi::process::OsProcessModuleInfoObj; -use memflow_ffi::util::to_heap; -use memflow_win32::win32::Win32ModuleInfo; - -#[no_mangle] -pub extern "C" fn module_info_trait( - info: &'static mut Win32ModuleInfo, -) -> &'static mut OsProcessModuleInfoObj { - to_heap(info) -} - -/// Free a win32 module info instance. -/// -/// Note that it is not the same as `OsProcessModuleInfoObj`, and those references need to be freed -/// manually. -/// -/// # Safety -/// -/// `info` must be a unique heap allocated reference to `Win32ModuleInfo`, and after this call the -/// reference will become invalid. -#[no_mangle] -pub unsafe extern "C" fn module_info_free(info: &'static mut Win32ModuleInfo) { - let _ = Box::from_raw(info); -} diff --git a/apex_dma/memflow_lib/memflow-win32-ffi/src/win32/process.rs b/apex_dma/memflow_lib/memflow-win32-ffi/src/win32/process.rs deleted file mode 100644 index 80b3436..0000000 --- a/apex_dma/memflow_lib/memflow-win32-ffi/src/win32/process.rs +++ /dev/null @@ -1,136 +0,0 @@ -use super::kernel::{FFIVirtualMemory, Kernel}; - -use memflow::iter::FnExtend; -use memflow_ffi::mem::virt_mem::VirtualMemoryObj; -use memflow_ffi::util::*; -use memflow_win32::win32::{self, Win32ModuleInfo, Win32ProcessInfo}; - -use std::ffi::CStr; -use std::os::raw::c_char; - -pub type Win32Process = win32::Win32Process; - -/// Create a process with kernel and process info -/// -/// # Safety -/// -/// `kernel` must be a valid heap allocated reference to a `Kernel` object. After the function -/// call, the reference becomes invalid. -#[no_mangle] -pub unsafe extern "C" fn process_with_kernel( - kernel: &'static mut Kernel, - proc_info: &Win32ProcessInfo, -) -> &'static mut Win32Process { - let kernel = Box::from_raw(kernel); - to_heap(Win32Process::with_kernel(*kernel, proc_info.clone())) -} - -/// Retrieve refernce to the underlying virtual memory object -/// -/// This will return a static reference to the virtual memory object. It will only be valid as long -/// as `process` if valid, and needs to be freed manually using `virt_free` regardless if the -/// process if freed or not. -#[no_mangle] -pub extern "C" fn process_virt_mem( - process: &'static mut Win32Process, -) -> &'static mut VirtualMemoryObj { - to_heap(&mut process.virt_mem) -} - -#[no_mangle] -pub extern "C" fn process_clone(process: &Win32Process) -> &'static mut Win32Process { - to_heap((*process).clone()) -} - -/// Frees the `process` -/// -/// # Safety -/// -/// `process` must be a valid heap allocated reference to a `Win32Process` object. After the -/// function returns, the reference becomes invalid. -#[no_mangle] -pub unsafe extern "C" fn process_free(process: &'static mut Win32Process) { - let _ = Box::from_raw(process); -} - -/// Retrieve a process module list -/// -/// This will fill up to `max_len` elements into `out` with references to `Win32ModuleInfo` objects. -/// -/// These references then need to be freed with `module_info_free` -/// -/// # Safety -/// -/// `out` must be a valid buffer able to contain `max_len` references to `Win32ModuleInfo`. -#[no_mangle] -pub unsafe extern "C" fn process_module_list( - process: &mut Win32Process, - out: *mut &'static mut Win32ModuleInfo, - max_len: usize, -) -> usize { - let mut ret = 0; - - let buffer = std::slice::from_raw_parts_mut(out, max_len); - - let mut extend_fn = FnExtend::new(|info| { - if ret < max_len { - buffer[ret] = to_heap(info); - ret += 1; - } - }); - - process - .module_list_extend(&mut extend_fn) - .map_err(inspect_err) - .ok() - .map(|_| ret) - .unwrap_or_default() -} - -/// Retrieve the main module of the process -/// -/// This function searches for a module with a base address -/// matching the section_base address from the ProcessInfo structure. -/// It then returns a reference to a newly allocated -/// `Win32ModuleInfo` object, if a module was found (null otherwise). -/// -/// The reference later needs to be freed with `module_info_free` -/// -/// # Safety -/// -/// `process` must be a valid Win32Process pointer. -#[no_mangle] -pub unsafe extern "C" fn process_main_module_info( - process: &mut Win32Process, -) -> Option<&'static mut Win32ModuleInfo> { - process - .main_module_info() - .map(to_heap) - .map_err(inspect_err) - .ok() -} - -/// Lookup a module -/// -/// This will search for a module called `name`, and return a reference to a newly allocated -/// `Win32ModuleInfo` object, if a module was found (null otherwise). -/// -/// The reference later needs to be freed with `module_info_free` -/// -/// # Safety -/// -/// `process` must be a valid Win32Process pointer. -/// `name` must be a valid null terminated string. -#[no_mangle] -pub unsafe extern "C" fn process_module_info( - process: &mut Win32Process, - name: *const c_char, -) -> Option<&'static mut Win32ModuleInfo> { - let name = CStr::from_ptr(name).to_string_lossy(); - - process - .module_info(&name) - .map(to_heap) - .map_err(inspect_err) - .ok() -} diff --git a/apex_dma/memflow_lib/memflow-win32-ffi/src/win32/process_info.rs b/apex_dma/memflow_lib/memflow-win32-ffi/src/win32/process_info.rs deleted file mode 100644 index f1e3a7e..0000000 --- a/apex_dma/memflow_lib/memflow-win32-ffi/src/win32/process_info.rs +++ /dev/null @@ -1,81 +0,0 @@ -use memflow::types::Address; -use memflow_ffi::process::OsProcessInfoObj; -use memflow_ffi::util::to_heap; -use memflow_win32::win32::{Win32ModuleListInfo, Win32ProcessInfo}; - -#[no_mangle] -pub extern "C" fn process_info_trait( - info: &'static mut Win32ProcessInfo, -) -> &'static mut OsProcessInfoObj { - to_heap(info) -} - -#[no_mangle] -pub extern "C" fn process_info_dtb(info: &Win32ProcessInfo) -> Address { - info.dtb -} - -#[no_mangle] -pub extern "C" fn process_info_section_base(info: &Win32ProcessInfo) -> Address { - info.section_base -} - -#[no_mangle] -pub extern "C" fn process_info_exit_status(info: &Win32ProcessInfo) -> i32 { - info.exit_status -} - -#[no_mangle] -pub extern "C" fn process_info_ethread(info: &Win32ProcessInfo) -> Address { - info.ethread -} - -#[no_mangle] -pub extern "C" fn process_info_wow64(info: &Win32ProcessInfo) -> Address { - info.wow64() -} - -#[no_mangle] -pub extern "C" fn process_info_peb(info: &Win32ProcessInfo) -> Address { - info.peb() -} - -#[no_mangle] -pub extern "C" fn process_info_peb_native(info: &Win32ProcessInfo) -> Address { - info.peb_native() -} - -#[no_mangle] -pub extern "C" fn process_info_peb_wow64(info: &Win32ProcessInfo) -> Address { - info.peb_wow64().unwrap_or_default() -} - -#[no_mangle] -pub extern "C" fn process_info_teb(info: &Win32ProcessInfo) -> Address { - info.teb.unwrap_or_default() -} - -#[no_mangle] -pub extern "C" fn process_info_teb_wow64(info: &Win32ProcessInfo) -> Address { - info.teb_wow64.unwrap_or_default() -} - -#[no_mangle] -pub extern "C" fn process_info_module_info(info: &Win32ProcessInfo) -> Win32ModuleListInfo { - info.module_info() -} - -#[no_mangle] -pub extern "C" fn process_info_module_info_native(info: &Win32ProcessInfo) -> Win32ModuleListInfo { - info.module_info_native() -} - -/// Free a process information reference -/// -/// # Safety -/// -/// `info` must be a valid heap allocated reference to a Win32ProcessInfo structure -#[no_mangle] -pub unsafe extern "C" fn process_info_free(info: &'static mut Win32ProcessInfo) { - let _ = Box::from_raw(info); -} diff --git a/apex_dma/memflow_lib/memflow-win32/.github/workflows/binary-build.yml b/apex_dma/memflow_lib/memflow-win32/.github/workflows/binary-build.yml new file mode 100644 index 0000000..075f757 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/.github/workflows/binary-build.yml @@ -0,0 +1,43 @@ +name: Binary build + +on: + push: + branch: + - 'main' + - 'stable' + +env: + CARGO_TERM_COLOR: always + +jobs: + + cross-build: + name: Publish binary builds + runs-on: ubuntu-latest + strategy: + matrix: + target: ["x86_64-unknown-linux-gnu", "i686-unknown-linux-gnu", "aarch64-unknown-linux-gnu", "armv7-unknown-linux-gnueabihf", "x86_64-pc-windows-gnu"] + steps: + - uses: actions/checkout@v2 + - name: Install rust 1.70.0 + uses: actions-rs/toolchain@v1 + with: + toolchain: 1.70.0 + override: true + - name: Download renamer + run: curl -sSf https://raw.githubusercontent.com/memflow/memflowup/master/target_rename.sh > target_rename.sh + - name: Build artifacts + uses: actions-rs/cargo@v1 + with: + use-cross: true + command: build + args: --release --all-features --workspace --verbose --target ${{ matrix.target }} + - name: Rename and collect artifacts + id: artifacts + run: echo "::set-output name=artifact::$(sh ./target_rename.sh "${{ matrix.target }}" | head -n 1)" + - name: Upload build artifacts + uses: softprops/action-gh-release@v1 + with: + tag_name: bin-${{ github.ref_name }} + files: | + ${{ steps.artifacts.outputs.artifact }} diff --git a/apex_dma/memflow_lib/memflow-win32/.github/workflows/build.yml b/apex_dma/memflow_lib/memflow-win32/.github/workflows/build.yml new file mode 100644 index 0000000..de9d923 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/.github/workflows/build.yml @@ -0,0 +1,115 @@ +name: Build and test + +on: [push, pull_request] + +env: + CARGO_TERM_COLOR: always + +jobs: + + build: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [macos-latest, ubuntu-latest, windows-latest] + flags: [--all-features, --no-default-features] + steps: + - uses: actions/checkout@v2 + - name: Install rust 1.70.0 + uses: actions-rs/toolchain@v1 + with: + toolchain: 1.70.0 + override: true + + - name: Build + run: cargo build --workspace ${{ matrix.flags }} --verbose + + - name: Build examples + run: cargo build --workspace ${{ matrix.flags }} --examples --verbose + + build-cross-targets: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + target: [aarch64-unknown-linux-gnu, armv7-unknown-linux-gnueabihf, i686-unknown-linux-gnu] + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + toolchain: 1.70.0 + target: ${{ matrix.target }} + override: true + - uses: actions-rs/cargo@v1 + with: + use-cross: true + command: build + args: --target ${{ matrix.target }} --workspace --all-features --verbose + + test: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [macos-latest, ubuntu-latest, windows-latest] + steps: + - uses: actions/checkout@v2 + - name: Install rust 1.70.0 + uses: actions-rs/toolchain@v1 + with: + toolchain: 1.70.0 + override: true + + - name: Pre-build binaries (for inventory integration tests) + run: cargo build --workspace --all-features --verbose + + - name: Run all tests + run: cargo test --workspace --all-features --verbose + if: runner.os == 'Linux' + + - name: Run all tests + run: cargo test --workspace --exclude memflow-derive --all-features --verbose + if: runner.os != 'Linux' + + test-cross: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + target: [aarch64-unknown-linux-gnu, i686-unknown-linux-gnu] + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + toolchain: nightly # currently required due to -Zdoctest-xcompile + target: ${{ matrix.target }} + override: true + - name: Pre-build binaries (for inventory integration tests) + uses: actions-rs/cargo@v1 + with: + use-cross: true + command: build + args: --target ${{ matrix.target }} --workspace --all-features --verbose --release + - name: Run all tests + uses: actions-rs/cargo@v1 + with: + use-cross: true + command: test + args: -Zdoctest-xcompile --target ${{ matrix.target }} --workspace --all-features --verbose --release + + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + toolchain: 1.70.0 + override: true + components: rustfmt, clippy + - run: rustup component add clippy + - name: Check formatting + run: cargo fmt -- --check + - uses: actions-rs/clippy-check@v1 + with: + token: ${{ secrets.GITHUB_TOKEN }} + args: --all-targets --all-features --workspace -- -D clippy::all + diff --git a/apex_dma/memflow_lib/memflow-win32/.gitignore b/apex_dma/memflow_lib/memflow-win32/.gitignore new file mode 100644 index 0000000..a420e20 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/.gitignore @@ -0,0 +1,10 @@ +/target +**/*.rs.bk +*.swp +*.so +*.dll +*.dylib +.vscode +*.so +.vagrant +TODO.md diff --git a/apex_dma/memflow_lib/memflow-win32/Cargo.toml b/apex_dma/memflow_lib/memflow-win32/Cargo.toml index 399c3a3..2ec0b56 100644 --- a/apex_dma/memflow_lib/memflow-win32/Cargo.toml +++ b/apex_dma/memflow_lib/memflow-win32/Cargo.toml @@ -1,80 +1,18 @@ -[package] -name = "memflow-win32" -version = "0.1.5" -authors = ["ko1N ", "Aurimas Blažulionis <0x60@pm.me>"] -edition = "2018" -description = "win32 integration of the memflow physical memory introspection framework" -documentation = "https://docs.rs/memflow-win32" -readme = "README.md" -homepage = "https://memflow.github.io" -repository = "https://github.com/memflow/memflow" -license-file = "../LICENSE" -keywords = [ "memflow", "introspection", "memory", "dma" ] -categories = [ "api-bindings", "memory-management", "os" ] +[profile.bench] +debug = true -[badges] -maintenance = { status = "actively-developed" } -codecov = { repository = "github", branch = "master", service = "github" } +[workspace] +resolver = "1" -[dependencies] -memflow = { version = "0.1", path = "../memflow", default-features = false } -log = { version = "0.4", default-features = false } -dataview = "0.1" -pelite = { version = "0.9", default-features = false } -widestring = { version = "0.4", default-features = false, features = ["alloc"] } -no-std-compat = { version = "0.4", features = ["alloc"] } -serde = { version = "1.0", default-features = false, optional = true, features = ["derive"] } +members = [ + "memflow-win32", + "memflow-win32-defs", +] -# will be replaced by our own signature scanner -regex = { version = "1", optional = true } +default-members = [ + "memflow-win32", + "memflow-win32-defs", +] -# symbolstore -dirs = { version = "2.0", optional = true } -ureq = { version = "1.2", optional = true } -pdb = { version = "0.6", optional = true } -pbr = { version = "1.0", optional = true } -progress-streams = { version = "1.1", optional = true } - -[dev_dependencies] -simple_logger = "1.0" -win_key_codes = "0.1" -rand = "0.7" -rand_xorshift = "0.2" -clap = "2.33" -toml = "0.5" -colored = "2.0" - -[build_dependencies] -toml = "0.5" -dataview = "0.1" -serde = { version = "1.0", default-features = false, features = ["derive", "alloc"] } - -[features] -default = ["std", "serde_derive", "embed_offsets", "symstore", "download_progress", "regex"] -std = ["no-std-compat/std", "memflow/std"] -embed_offsets = ["serde", "memflow/serde_derive"] -collections = [] -alloc = [] -serde_derive = ["serde", "memflow/serde_derive", "pelite/std", "pelite/serde"] -symstore = ["dirs", "ureq", "pdb"] -download_progress = ["pbr", "progress-streams"] - -[[example]] -name = "dump_offsets" -path = "examples/dump_offsets.rs" - -[[example]] -name = "generate_offsets" -path = "examples/generate_offsets.rs" - -[[example]] -name = "read_keys" -path = "examples/read_keys.rs" - -[[example]] -name = "multithreading" -path = "examples/multithreading.rs" - -[[example]] -name = "read_bench" -path = "examples/read_bench.rs" +# [patch.crates-io] +# memflow = { path = "../memflow/memflow" } diff --git a/apex_dma/memflow_lib/memflow-qemu-procfs/LICENSE b/apex_dma/memflow_lib/memflow-win32/LICENSE similarity index 89% rename from apex_dma/memflow_lib/memflow-qemu-procfs/LICENSE rename to apex_dma/memflow_lib/memflow-win32/LICENSE index 7c11d2d..cf3ebd7 100644 --- a/apex_dma/memflow_lib/memflow-qemu-procfs/LICENSE +++ b/apex_dma/memflow_lib/memflow-win32/LICENSE @@ -1,7 +1,7 @@ MIT License -Copyright (c) 2020 ko1N -Copyright (c) 2020 Aurimas Blažulionis <0x60@pm.me> +Copyright (c) 2020-2022 ko1N +Copyright (c) 2020-2022 Aurimas Blažulionis <0x60@pm.me> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -19,5 +19,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - +SOFTWARE. \ No newline at end of file diff --git a/apex_dma/memflow_lib/memflow-win32/README.md b/apex_dma/memflow_lib/memflow-win32/README.md index ef7bf99..64b77c7 100644 --- a/apex_dma/memflow_lib/memflow-win32/README.md +++ b/apex_dma/memflow_lib/memflow-win32/README.md @@ -7,38 +7,4 @@ This crate provides integration for win32 targets for [memflow](https://github.com/memflow/memflow). This library can be used in addition to the memflow core itself read processes, modules, drivers, etc. -Example initializing a win32 target: -```rust -use std::fs::File; -use std::io::Write; - -use log::{error, Level}; - -use memflow::connector::*; -use memflow_win32::win32::{Kernel, Win32OffsetFile}; - -pub fn main() { - let connector_name = std::env::args().nth(1).unwrap(); - let connector_args = std::env::args().nth(2).unwrap_or_default(); - - // create inventory + connector - let inventory = unsafe { ConnectorInventory::try_new() }.unwrap(); - let connector = unsafe { - inventory.create_connector( - &connector_name, - &ConnectorArgs::parse(&connector_args).unwrap(), - ) - } - .unwrap(); - - // initialize kernel - let kernel = Kernel::builder(connector) - .build_default_caches() - .build() - .unwrap(); - - println!("{:?}", kernel); -} -``` - -Additional examples can be found in the `examples` subdirectory. +Examples can be found in the `memflow-win32/examples` subdirectory. diff --git a/apex_dma/memflow_lib/memflow-win32/build.rs b/apex_dma/memflow_lib/memflow-win32/build.rs deleted file mode 100644 index 580ee8a..0000000 --- a/apex_dma/memflow_lib/memflow-win32/build.rs +++ /dev/null @@ -1,50 +0,0 @@ -use dataview::Pod; -use std::{ - env, - error::Error, - fs::{self, File}, - io::{Read, Write}, - path::Path, -}; - -#[path = "src/offsets/offset_table.rs"] -#[cfg(feature = "embed_offsets")] -mod offset_table; - -#[cfg(feature = "embed_offsets")] -use offset_table::Win32OffsetFile; - -#[cfg(feature = "embed_offsets")] -fn embed_offsets() -> Result<(), Box> { - let out_dir = env::var("OUT_DIR")?; - let dest_path = Path::new(&out_dir).join("win32_offsets.bin"); - let mut all_the_files = File::create(&dest_path)?; - - // iterate offsets folder - for f in fs::read_dir("./offsets")? { - let f = f?; - - if !f.file_type()?.is_file() { - continue; - } - - let mut file = File::open(f.path())?; - let mut tomlstr = String::new(); - file.read_to_string(&mut tomlstr)?; - - let offsets: Win32OffsetFile = toml::from_str(&tomlstr)?; - all_the_files.write_all(offsets.as_bytes())?; - } - - Ok(()) -} - -#[cfg(not(feature = "embed_offsets"))] -fn embed_offsets() -> Result<(), Box> { - Ok(()) -} - -fn main() -> Result<(), Box> { - embed_offsets()?; - Ok(()) -} diff --git a/apex_dma/memflow_lib/memflow-win32/examples/dump_offsets.rs b/apex_dma/memflow_lib/memflow-win32/examples/dump_offsets.rs deleted file mode 100644 index 8409ab5..0000000 --- a/apex_dma/memflow_lib/memflow-win32/examples/dump_offsets.rs +++ /dev/null @@ -1,110 +0,0 @@ -use std::fs::File; -use std::io::Write; - -use clap::*; -use log::{error, Level}; - -use memflow::connector::*; - -use memflow_win32::prelude::{Kernel, Win32OffsetFile}; - -pub fn main() { - let matches = App::new("dump offsets example") - .version(crate_version!()) - .author(crate_authors!()) - .arg(Arg::with_name("verbose").short("v").multiple(true)) - .arg( - Arg::with_name("connector") - .long("connector") - .short("c") - .takes_value(true) - .required(true), - ) - .arg( - Arg::with_name("args") - .long("args") - .short("a") - .takes_value(true) - .default_value(""), - ) - .arg( - Arg::with_name("output") - .long("output") - .short("o") - .takes_value(true), - ) - .get_matches(); - - // set log level - let level = match matches.occurrences_of("verbose") { - 0 => Level::Error, - 1 => Level::Warn, - 2 => Level::Info, - 3 => Level::Debug, - 4 => Level::Trace, - _ => Level::Trace, - }; - simple_logger::SimpleLogger::new() - .with_level(level.to_level_filter()) - .init() - .unwrap(); - - // create inventory + connector - let inventory = unsafe { ConnectorInventory::scan() }; - let connector = unsafe { - inventory.create_connector( - matches.value_of("connector").unwrap(), - &ConnectorArgs::parse(matches.value_of("args").unwrap()).unwrap(), - ) - } - .unwrap(); - - let kernel = Kernel::builder(connector) - .build_default_caches() - .build() - .unwrap(); - - let winver = kernel.kernel_info.kernel_winver; - - if winver != (0, 0).into() { - let offsets = if let Some(guid) = &kernel.kernel_info.kernel_guid { - Win32OffsetFile { - pdb_file_name: guid.file_name.as_str().into(), - pdb_guid: guid.guid.as_str().into(), - - arch: kernel.kernel_info.start_block.arch.into(), - - nt_major_version: winver.major_version(), - nt_minor_version: winver.minor_version(), - nt_build_number: winver.build_number(), - - offsets: kernel.offsets.into(), - } - } else { - Win32OffsetFile { - pdb_file_name: Default::default(), - pdb_guid: Default::default(), - - arch: kernel.kernel_info.start_block.arch.into(), - - nt_major_version: winver.major_version(), - nt_minor_version: winver.minor_version(), - nt_build_number: winver.build_number(), - - offsets: kernel.offsets.into(), - } - }; - - // write offsets to file - let offsetstr = toml::to_string_pretty(&offsets).unwrap(); - match matches.value_of("output") { - Some(output) => { - let mut file = File::create(output).unwrap(); - file.write_all(offsetstr.as_bytes()).unwrap(); - } - None => println!("{}", offsetstr), - } - } else { - error!("kernel version has to be valid in order to generate a offsets file"); - } -} diff --git a/apex_dma/memflow_lib/memflow-win32/examples/integration.rs b/apex_dma/memflow_lib/memflow-win32/examples/integration.rs deleted file mode 100644 index eb3099c..0000000 --- a/apex_dma/memflow_lib/memflow-win32/examples/integration.rs +++ /dev/null @@ -1,211 +0,0 @@ -use memflow::connector::ConnectorInventory; -use memflow::connector::ConnectorArgs; -use memflow::mem::*; - -use memflow_win32::error::{Error, Result}; -use memflow_win32::win32::{Kernel, Win32ModuleInfo, Win32Process}; - -use clap::*; -use log::Level; - -use colored::*; - -static mut HAD_ERROR: bool = false; - -fn main() -> Result<()> { - let (connector, args_str) = parse_args(); - - let args = ConnectorArgs::parse(&args_str)?; - - // create inventory + connector - let inventory = unsafe { ConnectorInventory::scan() }; - let connector = unsafe { inventory.create_connector(&connector, &args)? }; - - let mut kernel = build_kernel(connector)?; - - { - println!("Kernel info:"); - let info = &kernel.kernel_info; - let start_block = &info.start_block; - println!( - "{:#?} ... {}", - start_block, - some_str(&start_block.dtb.non_null()) - ); - println!( - "kernel_base: {:x} ... {}", - info.kernel_base, - some_str(&info.kernel_base.non_null()) - ); - println!( - "kernel_size: {:x} ... {}", - info.kernel_size, - bool_str(info.kernel_size != 0) - ); - println!( - "kernel_guid: {:?} ... {}", - info.kernel_guid, - some_str(&info.kernel_guid) - ); - println!( - "kernel_winver: {:?} ... {}", - info.kernel_winver.as_tuple(), - bool_str(info.kernel_winver != (0, 0).into()) - ); - println!( - "eprocess_base: {:x} ... {}", - info.eprocess_base, - some_str(&info.eprocess_base.non_null()) - ); - println!(); - } - - { - println!("Kernel Process:"); - if let Ok(proc_info) = kernel.kernel_process_info() { - let mut kernel_proc = Win32Process::with_kernel_ref(&mut kernel, proc_info); - let modules = modules(&mut kernel_proc)?; - println!("checking module list:"); - println!( - "ntoskrnl.exe ... {}", - some_str( - &modules - .iter() - .find(|e| e.name.to_lowercase() == "ntoskrnl.exe") - ) - ); - println!( - "hal.dll ... {}", - some_str(&modules.iter().find(|e| e.name.to_lowercase() == "hal.dll")) - ); - } else { - println!("{}", bool_str(false)); - } - println!(); - } - - { - println!("Process List:"); - let proc_list = kernel.process_info_list()?; - let lsass = proc_list - .iter() - .find(|p| p.name.to_lowercase() == "lsass.exe"); - println!("lsass.exe ... {}", some_str(&lsass)); - println!(); - - if let Some(proc) = lsass { - println!("{} info:", proc.name); - println!("pid: {} ... {}", proc.pid, bool_str(proc.pid < 10000)); - println!("dtb: {} ... {}", proc.dtb, some_str(&proc.dtb.non_null())); - println!( - "section_base: {} ... {}", - proc.section_base, - some_str(&proc.section_base.non_null()) - ); - println!( - "ethread: {} ... {}", - proc.ethread, - some_str(&proc.ethread.non_null()) - ); - println!("teb: {:?} ... {}", proc.teb, bool_str(proc.teb.is_none())); - println!( - "teb_wow64: {:?} ... {}", - proc.teb_wow64, - bool_str(proc.teb_wow64.is_none()) - ); - println!( - "peb_native: {} ... {}", - proc.peb_native, - some_str(&proc.peb_native.non_null()) - ); - println!( - "peb_wow64: {:?} ... {}", - proc.teb_wow64, - bool_str(proc.peb_wow64.is_none()) - ); - } - } - - unsafe { - if HAD_ERROR { - Err(Error::Other( - "Some errors encountered, not all functionality may be present!", - )) - } else { - Ok(()) - } - } -} - -fn some_str(r: &Option) -> ColoredString { - bool_str(r.is_some()) -} - -fn ok_str(r: &Result) -> ColoredString { - bool_str(r.is_ok()) -} - -fn bool_str(b: bool) -> ColoredString { - if b { - "ok".green() - } else { - unsafe { HAD_ERROR = true }; - "error".red() - } -} - -fn modules(process: &mut Win32Process) -> Result> { - let modules = process.module_list(); - println!("modules ... {}", ok_str(&modules)); - modules -} - -fn build_kernel( - mem: T, -) -> Result> { - let kernel = Kernel::builder(mem).build_default_caches().build(); - println!("Kernel::build ... {}", ok_str(&kernel)); - println!(); - kernel -} - -fn parse_args() -> (String, String) { - let matches = App::new("read_keys example") - .version(crate_version!()) - .author(crate_authors!()) - .arg(Arg::with_name("verbose").short("v").multiple(true)) - .arg( - Arg::with_name("connector") - .long("connector") - .short("c") - .takes_value(true) - .required(true), - ) - .arg( - Arg::with_name("args") - .long("args") - .short("a") - .takes_value(true) - .default_value(""), - ) - .get_matches(); - - // set log level - let level = match matches.occurrences_of("verbose") { - 0 => Level::Error, - 1 => Level::Warn, - 2 => Level::Info, - 3 => Level::Debug, - 4 => Level::Trace, - _ => Level::Trace, - }; - simple_logger::SimpleLogger::new() - .with_level(level.to_level_filter()) - .init() - .unwrap(); - - ( - matches.value_of("connector").unwrap().into(), - matches.value_of("args").unwrap().into(), - ) -} diff --git a/apex_dma/memflow_lib/memflow-win32/examples/multithreading.rs b/apex_dma/memflow_lib/memflow-win32/examples/multithreading.rs deleted file mode 100644 index afce665..0000000 --- a/apex_dma/memflow_lib/memflow-win32/examples/multithreading.rs +++ /dev/null @@ -1,136 +0,0 @@ -use std::thread; - -use clap::*; -use log::{info, Level}; - -use memflow::connector::*; -use memflow::mem::*; - -use memflow_win32::win32::Kernel; - -pub fn parallel_init(connector: T) { - (0..8) - .map(|_| connector.clone()) - .into_iter() - .map(|c| { - thread::spawn(move || { - Kernel::builder(c) - .no_symbol_store() - .build_default_caches() - .build() - .unwrap(); - }) - }) - .for_each(|t| t.join().unwrap()); -} - -pub fn parallel_kernels(connector: T) { - let kernel = Kernel::builder(connector).build().unwrap(); - - (0..8) - .map(|_| kernel.clone()) - .into_iter() - .map(|mut k| { - thread::spawn(move || { - let _eprocesses = k.eprocess_list().unwrap(); - }) - }) - .for_each(|t| t.join().unwrap()); -} - -pub fn parallel_kernels_cached(connector: T) { - let kernel = Kernel::builder(connector) - .build_default_caches() - .build() - .unwrap(); - - (0..8) - .map(|_| kernel.clone()) - .into_iter() - .map(|mut k| { - thread::spawn(move || { - let eprocesses = k.eprocess_list().unwrap(); - info!("eprocesses list fetched: {}", eprocesses.len()); - }) - }) - .for_each(|t| t.join().unwrap()); -} - -pub fn parallel_processes(connector: T) { - let kernel = Kernel::builder(connector) - .build_default_caches() - .build() - .unwrap(); - - let process = kernel.into_process("wininit.exe").unwrap(); - - (0..8) - .map(|_| process.clone()) - .into_iter() - .map(|mut p| { - thread::spawn(move || { - let module_list = p.module_list().unwrap(); - info!("wininit.exe module_list: {}", module_list.len()); - }) - }) - .for_each(|t| t.join().unwrap()); -} - -pub fn main() { - let matches = App::new("read_keys example") - .version(crate_version!()) - .author(crate_authors!()) - .arg(Arg::with_name("verbose").short("v").multiple(true)) - .arg( - Arg::with_name("connector") - .long("connector") - .short("c") - .takes_value(true) - .required(true), - ) - .arg( - Arg::with_name("args") - .long("args") - .short("a") - .takes_value(true) - .default_value(""), - ) - .get_matches(); - - // set log level - let level = match matches.occurrences_of("verbose") { - 0 => Level::Error, - 1 => Level::Warn, - 2 => Level::Info, - 3 => Level::Debug, - 4 => Level::Trace, - _ => Level::Trace, - }; - simple_logger::SimpleLogger::new() - .with_level(level.to_level_filter()) - .init() - .unwrap(); - - // create inventory + connector - let inventory = unsafe { ConnectorInventory::scan() }; - let connector = unsafe { - inventory.create_connector( - matches.value_of("connector").unwrap(), - &ConnectorArgs::parse(matches.value_of("args").unwrap()).unwrap(), - ) - } - .unwrap(); - - println!("test"); - - // parallel test functions - // see each function's implementation for further details - - parallel_init(connector.clone()); - - parallel_kernels(connector.clone()); - - parallel_kernels_cached(connector.clone()); - - parallel_processes(connector); -} diff --git a/apex_dma/memflow_lib/memflow-win32/examples/read_bench.rs b/apex_dma/memflow_lib/memflow-win32/examples/read_bench.rs deleted file mode 100644 index 3260c67..0000000 --- a/apex_dma/memflow_lib/memflow-win32/examples/read_bench.rs +++ /dev/null @@ -1,216 +0,0 @@ -use std::io::Write; -use std::time::{Duration, Instant}; - -use clap::*; -use log::Level; - -use memflow::connector::*; -use memflow::mem::*; -use memflow::process::*; -use memflow::types::*; - -use memflow_win32::error::Result; -use memflow_win32::offsets::Win32Offsets; -use memflow_win32::win32::{Kernel, KernelInfo, Win32ModuleInfo, Win32Process}; - -use rand::{Rng, SeedableRng}; -use rand_xorshift::XorShiftRng as CurRng; - -fn rwtest( - proc: &mut Win32Process, - module: &dyn OsProcessModuleInfo, - chunk_sizes: &[usize], - chunk_counts: &[usize], - read_size: usize, -) { - let mut rng = CurRng::seed_from_u64(0); - - println!("Performance bench:"); - print!("{:#7}", "SIZE"); - - for i in chunk_counts { - print!(", x{:02x} mb/s, x{:02x} calls/s", *i, *i); - } - - println!(); - - let start = Instant::now(); - let mut ttdur = Duration::new(0, 0); - - for i in chunk_sizes { - print!("0x{:05x}", *i); - for o in chunk_counts { - let mut done_size = 0_usize; - let mut total_dur = Duration::new(0, 0); - let mut calls = 0; - let mut bufs = vec![(vec![0 as u8; *i], 0); *o]; - - let base_addr = rng.gen_range( - module.base().as_u64(), - module.base().as_u64() + module.size() as u64, - ); - - while done_size < read_size { - for (_, addr) in bufs.iter_mut() { - *addr = base_addr + rng.gen_range(0, 0x2000); - } - - let now = Instant::now(); - { - let mut batcher = proc.virt_mem.virt_batcher(); - - for (buf, addr) in bufs.iter_mut() { - batcher.read_raw_into(Address::from(*addr), buf); - } - } - total_dur += now.elapsed(); - done_size += *i * *o; - calls += 1; - } - - ttdur += total_dur; - let total_time = total_dur.as_secs_f64(); - - print!( - ", {:8.2}, {:11.2}", - (done_size / 0x0010_0000) as f64 / total_time, - calls as f64 / total_time - ); - std::io::stdout().flush().expect(""); - } - println!(); - } - - let total_dur = start.elapsed(); - println!( - "Total bench time: {:.2} {:.2}", - total_dur.as_secs_f64(), - ttdur.as_secs_f64() - ); -} - -fn read_bench( - phys_mem: &mut T, - vat: &mut V, - kernel_info: KernelInfo, -) -> Result<()> { - let offsets = Win32Offsets::builder().kernel_info(&kernel_info).build()?; - let mut kernel = Kernel::new(phys_mem, vat, offsets, kernel_info); - - let proc_list = kernel.process_info_list()?; - let mut rng = CurRng::seed_from_u64(rand::thread_rng().gen_range(0, !0u64)); - loop { - let mut prc = Win32Process::with_kernel_ref( - &mut kernel, - proc_list[rng.gen_range(0, proc_list.len())].clone(), - ); - - let mod_list: Vec = prc - .module_list()? - .into_iter() - .filter(|module| module.size() > 0x1000) - .collect(); - - if !mod_list.is_empty() { - let tmod = &mod_list[rng.gen_range(0, mod_list.len())]; - println!( - "Found test module {} ({:x}) in {}", - tmod.name(), - tmod.size(), - prc.proc_info.name(), - ); - - let mem_map = prc.virt_mem.virt_page_map(size::gb(1)); - - println!("Memory map (with up to 1GB gaps):"); - - for (addr, len) in mem_map { - println!("{:x}-{:x}", addr, addr + len); - } - - rwtest( - &mut prc, - tmod, - &[0x10000, 0x1000, 0x100, 0x10, 0x8], - &[32, 8, 1], - 0x0010_0000 * 32, - ); - - break; - } - } - - Ok(()) -} - -fn main() -> Result<()> { - let matches = App::new("read_keys example") - .version(crate_version!()) - .author(crate_authors!()) - .arg(Arg::with_name("verbose").short("v").multiple(true)) - .arg( - Arg::with_name("connector") - .long("connector") - .short("c") - .takes_value(true) - .required(true), - ) - .arg( - Arg::with_name("args") - .long("args") - .short("a") - .takes_value(true) - .default_value(""), - ) - .get_matches(); - - // set log level - let level = match matches.occurrences_of("verbose") { - 0 => Level::Error, - 1 => Level::Warn, - 2 => Level::Info, - 3 => Level::Debug, - 4 => Level::Trace, - _ => Level::Trace, - }; - simple_logger::SimpleLogger::new() - .with_level(level.to_level_filter()) - .init() - .unwrap(); - - // create inventory + connector - let inventory = unsafe { ConnectorInventory::scan() }; - let mut connector = unsafe { - inventory.create_connector( - matches.value_of("connector").unwrap(), - &ConnectorArgs::parse(matches.value_of("args").unwrap()).unwrap(), - ) - } - .unwrap(); - - // scan for win32 kernel - let kernel_info = KernelInfo::scanner(&mut connector).scan()?; - - let mut vat = DirectTranslate::new(); - - println!("Benchmarking uncached reads:"); - read_bench(&mut connector, &mut vat, kernel_info.clone()).unwrap(); - - println!(); - println!("Benchmarking cached reads:"); - let mut mem_cached = CachedMemoryAccess::builder(&mut connector) - .arch(kernel_info.start_block.arch) - .build() - .unwrap(); - - let mut vat_cached = CachedVirtualTranslate::builder(vat) - .arch(kernel_info.start_block.arch) - .build() - .unwrap(); - - read_bench(&mut mem_cached, &mut vat_cached, kernel_info).unwrap(); - - println!("TLB Hits {}\nTLB Miss {}", vat_cached.hitc, vat_cached.misc); - - Ok(()) -} diff --git a/apex_dma/memflow_lib/memflow-win32/examples/read_keys.rs b/apex_dma/memflow_lib/memflow-win32/examples/read_keys.rs deleted file mode 100644 index 1821088..0000000 --- a/apex_dma/memflow_lib/memflow-win32/examples/read_keys.rs +++ /dev/null @@ -1,69 +0,0 @@ -use std::{thread, time}; - -use clap::*; -use log::Level; - -use memflow::connector::*; - -use memflow_win32::win32::{Kernel, Keyboard}; - -pub fn main() { - let matches = App::new("read_keys example") - .version(crate_version!()) - .author(crate_authors!()) - .arg(Arg::with_name("verbose").short("v").multiple(true)) - .arg( - Arg::with_name("connector") - .long("connector") - .short("c") - .takes_value(true) - .required(true), - ) - .arg( - Arg::with_name("args") - .long("args") - .short("a") - .takes_value(true) - .default_value(""), - ) - .get_matches(); - - // set log level - let level = match matches.occurrences_of("verbose") { - 0 => Level::Error, - 1 => Level::Warn, - 2 => Level::Info, - 3 => Level::Debug, - 4 => Level::Trace, - _ => Level::Trace, - }; - simple_logger::SimpleLogger::new() - .with_level(level.to_level_filter()) - .init() - .unwrap(); - - // create inventory + connector - let inventory = unsafe { ConnectorInventory::scan() }; - let connector = unsafe { - inventory.create_connector( - matches.value_of("connector").unwrap(), - &ConnectorArgs::parse(matches.value_of("args").unwrap()).unwrap(), - ) - } - .unwrap(); - - // creating the kernel object - let mut kernel = Kernel::builder(connector) - .build_default_caches() - .build() - .unwrap(); - - // fetch keyboard state - let kbd = Keyboard::try_with(&mut kernel).unwrap(); - - loop { - let kbs = kbd.state_with_kernel(&mut kernel).unwrap(); - println!("space down: {:?}", kbs.is_down(win_key_codes::VK_SPACE)); - thread::sleep(time::Duration::from_millis(1000)); - } -} diff --git a/apex_dma/memflow_lib/memflow-win32/install.sh b/apex_dma/memflow_lib/memflow-win32/install.sh new file mode 100644 index 0000000..2f7e506 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/install.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +cargo build --release --all-features + +# install connector to system dir +if [ ! -z "$1" ] && [ $1 = "--system" ]; then + echo "installing connector system-wide in /usr/local/lib/memflow" + if [[ ! -d /usr/local/lib/memflow ]]; then + sudo mkdir /usr/local/lib/memflow + fi + sudo cp target/release/libmemflow_win32.so /usr/local/lib/memflow/libmemflow_win32.7.so +fi + +# install connector in user dir +echo "installing connector for user in ~/.local/lib/memflow" +if [[ ! -d ~/.local/lib/memflow ]]; then + mkdir -p ~/.local/lib/memflow +fi +cp target/release/libmemflow_win32.so ~/.local/lib/memflow/libmemflow_win32.7.so diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/Cargo.toml b/apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/Cargo.toml new file mode 100644 index 0000000..0054316 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "memflow-win32-defs" +version = "0.2.0" +authors = ["ko1N ", "Aurimas Blažulionis <0x60@pm.me>"] +edition = "2018" +description = "static offset templates for " +documentation = "https://docs.rs/memflow-win32-defs" +readme = "../README.md" +homepage = "https://memflow.io" +repository = "https://github.com/memflow/memflow-win32" +license = "MIT" +keywords = [ "memflow", "introspection", "memory", "dma" ] +categories = [ "api-bindings", "memory-management", "os" ] + +[dependencies] +memflow = { version = "0.2", default-features = false } +log = { version = "^0.4.14", default-features = false } +no-std-compat = { version = "^0.4.1", features = ["alloc"] } +serde = { version = "^1.0.133", default-features = false, optional = true, features = ["derive"] } + +# symbolstore +dirs = { version = "^5.0.0", optional = true } +ureq = { version = "^2.4.0", optional = true } +pdb = { version = "^0.8.0", optional = true } +indicatif = { version = "^0.17.2", optional = true } +progress-streams = { version = "^1.1.0", optional = true } + +[dev_dependencies] +simplelog = "^0.12.0" +clap = { version = "^4.0.26", features = ["cargo"] } +toml = "^0.8.8" + +[features] +default = ["symstore", "download_progress"] +std = ["no-std-compat/std"] +symstore = ["dirs", "ureq", "pdb", "std"] +download_progress = ["indicatif", "progress-streams"] + +[[example]] +name = "generate_offsets" +path = "examples/generate_offsets.rs" +required-features = ["symstore", "serde"] diff --git a/apex_dma/memflow_lib/memflow-win32/examples/generate_offsets.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/examples/generate_offsets.rs similarity index 53% rename from apex_dma/memflow_lib/memflow-win32/examples/generate_offsets.rs rename to apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/examples/generate_offsets.rs index 42ffbb4..809fcf7 100644 --- a/apex_dma/memflow_lib/memflow-win32/examples/generate_offsets.rs +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/examples/generate_offsets.rs @@ -4,26 +4,22 @@ use std::fs::{create_dir_all, File}; use std::io::Write; use std::path::PathBuf; -use memflow_win32::prelude::{ - SymbolStore, Win32GUID, Win32OffsetFile, Win32Offsets, Win32OffsetsArchitecture, Win32Version, -}; +use memflow_win32_defs::{kernel::*, offsets::*}; pub fn main() { - let matches = App::new("generate offsets example") + let matches = Command::new("generate offsets example") .version(crate_version!()) .author(crate_authors!()) - .arg(Arg::with_name("verbose").short("v").multiple(true)) + .arg(Arg::new("verbose").short('v').action(ArgAction::Count)) .arg( - Arg::with_name("output") - .long("output") - .short("o") - .takes_value(true) + Arg::new("output") + .short('o') + .action(ArgAction::Set) .required(true), ) .get_matches(); - // set log level - let level = match matches.occurrences_of("verbose") { + let log_level = match matches.get_count("verbose") { 0 => Level::Error, 1 => Level::Warn, 2 => Level::Info, @@ -31,51 +27,69 @@ pub fn main() { 4 => Level::Trace, _ => Level::Trace, }; - simple_logger::SimpleLogger::new() - .with_level(level.to_level_filter()) - .init() - .unwrap(); + simplelog::TermLogger::init( + log_level.to_level_filter(), + simplelog::Config::default(), + simplelog::TerminalMode::Stdout, + simplelog::ColorChoice::Auto, + ) + .unwrap(); let win_ids = vec![ /* ( Win32Version::new(5, 2, 3790), - Win32GUID::new("ntkrnlmp.pdb", "82DCF67A38274C9CA99B60B421D2786D2"), + Win32Guid::new("ntkrnlmp.pdb", "82DCF67A38274C9CA99B60B421D2786D2"), ), */ ( Win32Version::new(6, 1, 7601), Win32OffsetsArchitecture::X86, - Win32GUID::new("ntkrpamp.pdb", "684DA42A30CC450F81C535B4D18944B12"), + Win32Guid::new("ntkrpamp.pdb", "684DA42A30CC450F81C535B4D18944B12"), ), ( Win32Version::new(6, 1, 7601), Win32OffsetsArchitecture::X64, - Win32GUID::new("ntkrnlmp.pdb", "ECE191A20CFF4465AE46DF96C22638451"), + Win32Guid::new("ntkrnlmp.pdb", "ECE191A20CFF4465AE46DF96C22638451"), ), ( Win32Version::new(10, 0, 18362), Win32OffsetsArchitecture::X64, - Win32GUID::new("ntkrnlmp.pdb", "0AFB69F5FD264D54673570E37B38A3181"), + Win32Guid::new("ntkrnlmp.pdb", "0AFB69F5FD264D54673570E37B38A3181"), ), ( Win32Version::new(10, 0, 19041), Win32OffsetsArchitecture::X64, - Win32GUID::new("ntkrnlmp.pdb", "BBED7C2955FBE4522AAA23F4B8677AD91"), + Win32Guid::new("ntkrnlmp.pdb", "BBED7C2955FBE4522AAA23F4B8677AD91"), ), ( Win32Version::new(10, 0, 19041), Win32OffsetsArchitecture::X64, - Win32GUID::new("ntkrnlmp.pdb", "1C9875F76C8F0FBF3EB9A9D7C1C274061"), + Win32Guid::new("ntkrnlmp.pdb", "1C9875F76C8F0FBF3EB9A9D7C1C274061"), ), ( Win32Version::new(10, 0, 19041), + Win32OffsetsArchitecture::X64, + Win32Guid::new("ntkrnlmp.pdb", "9C00B19DBDE003DBFE4AB4216993C8431"), + ), + ( + Win32Version::new(10, 0, 19045), + Win32OffsetsArchitecture::X64, + Win32Guid::new("ntkrnlmp.pdb", "5F0CF5D532F385333A9B4ABA25CA65961"), + ), + ( + Win32Version::new(10, 0, 19041), + Win32OffsetsArchitecture::X86, + Win32Guid::new("ntkrpamp.pdb", "1B1D6AA205E1C87DC63A314ACAA50B491"), + ), + ( + Win32Version::new(10, 0, 4026553840), Win32OffsetsArchitecture::X86, - Win32GUID::new("ntkrpamp.pdb", "1B1D6AA205E1C87DC63A314ACAA50B491"), + Win32Guid::new("ntkrnlmp.pdb", "55678BC384F099B6ED05E9E39046924A1"), ), ]; - let out_dir = matches.value_of("output").unwrap(); + let out_dir = matches.get_one::("output").unwrap(); create_dir_all(out_dir).unwrap(); for win_id in win_ids.into_iter() { @@ -85,14 +99,16 @@ pub fn main() { .build() { let offset_file = Win32OffsetFile { - pdb_file_name: win_id.2.file_name.as_str().into(), - pdb_guid: win_id.2.guid.as_str().into(), + header: Win32OffsetHeader { + pdb_file_name: win_id.2.file_name.as_str().into(), + pdb_guid: win_id.2.guid.as_str().into(), - nt_major_version: win_id.0.major_version(), - nt_minor_version: win_id.0.minor_version(), - nt_build_number: win_id.0.build_number(), + nt_major_version: win_id.0.major_version(), + nt_minor_version: win_id.0.minor_version(), + nt_build_number: win_id.0.build_number(), - arch: win_id.1, + arch: win_id.1, + }, offsets: offsets.0, }; @@ -104,7 +120,7 @@ pub fn main() { win_id.0.major_version(), win_id.0.minor_version(), win_id.0.build_number(), - win_id.1.to_string(), + win_id.1, win_id.2.guid, ); diff --git a/apex_dma/memflow_lib/memflow-win32/src/kernel/mod.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/kernel.rs similarity index 95% rename from apex_dma/memflow_lib/memflow-win32/src/kernel/mod.rs rename to apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/kernel.rs index 30f37f0..e9a21f2 100644 --- a/apex_dma/memflow_lib/memflow-win32/src/kernel/mod.rs +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/kernel.rs @@ -1,22 +1,14 @@ -pub mod ntos; -pub mod start_block; -pub mod sysproc; - -use std::prelude::v1::*; - -pub use start_block::StartBlock; - use std::cmp::{Ord, Ordering, PartialEq}; use std::fmt; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] #[cfg_attr(feature = "serde", derive(::serde::Serialize))] -pub struct Win32GUID { +pub struct Win32Guid { pub file_name: String, pub guid: String, } -impl Win32GUID { +impl Win32Guid { pub fn new(file_name: &str, guid: &str) -> Self { Self { file_name: file_name.to_string(), diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/lib.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/lib.rs new file mode 100644 index 0000000..04878b0 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/lib.rs @@ -0,0 +1,5 @@ +#![cfg_attr(not(feature = "std"), no_std)] +extern crate no_std_compat as std; + +pub mod kernel; +pub mod offsets; diff --git a/apex_dma/memflow_lib/memflow-win32/src/offsets/builder.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/offsets/builder.rs similarity index 51% rename from apex_dma/memflow_lib/memflow-win32/src/offsets/builder.rs rename to apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/offsets/builder.rs index 853243f..429c49b 100644 --- a/apex_dma/memflow_lib/memflow-win32/src/offsets/builder.rs +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/offsets/builder.rs @@ -6,31 +6,22 @@ use super::symstore::SymbolStore; use super::offset_table::Win32OffsetFile; use super::{Win32Offsets, Win32OffsetsArchitecture}; -use crate::error::{Error, Result}; -use crate::kernel::{Win32GUID, Win32Version}; -use crate::win32::KernelInfo; - -#[repr(align(16))] -struct Align16(pub T); - -#[cfg(feature = "embed_offsets")] -const WIN32_OFFSETS: Align16< - [u8; include_bytes!(concat!(env!("OUT_DIR"), "/win32_offsets.bin")).len()], -> = Align16(*include_bytes!(concat!( - env!("OUT_DIR"), - "/win32_offsets.bin" -))); - -pub struct Win32OffsetBuilder { +use crate::kernel::{Win32Guid, Win32Version}; + +use memflow::error::{Error, ErrorKind, ErrorOrigin, Result}; + +pub struct Win32OffsetBuilder<'a> { #[cfg(feature = "symstore")] symbol_store: Option, - guid: Option, + guid: Option, winver: Option, arch: Option, + + offset_list: Option<&'a [Win32OffsetFile]>, } -impl Default for Win32OffsetBuilder { +impl<'a> Default for Win32OffsetBuilder<'a> { fn default() -> Self { Self { #[cfg(feature = "symstore")] @@ -39,20 +30,21 @@ impl Default for Win32OffsetBuilder { guid: None, winver: None, arch: None, + + offset_list: None, } } } -impl Win32OffsetBuilder { +impl<'a> Win32OffsetBuilder<'a> { pub fn new() -> Self { Self::default() } pub fn build(self) -> Result { if self.guid.is_none() && self.winver.is_none() { - return Err(Error::Other( - "building win32 offsets requires either a guid or winver", - )); + return Err(Error(ErrorOrigin::OsLayer, ErrorKind::Configuration) + .log_error("building win32 offsets requires either a guid or winver")); } // try to build via symbol store @@ -65,28 +57,25 @@ impl Win32OffsetBuilder { return Ok(offs); } - Err(Error::Other("not found")) + Err(Error(ErrorOrigin::OsLayer, ErrorKind::Configuration) + .log_error("no valid offset configuration found while building win32")) } - #[cfg(feature = "embed_offsets")] fn build_with_offset_list(&self) -> Result { - // # Safety - // Struct padding and alignment is compile-time guaranteed by the struct (see mod offset_table). - let offsets: [Win32OffsetFile; - WIN32_OFFSETS.0.len() / std::mem::size_of::()] = - unsafe { std::mem::transmute(WIN32_OFFSETS.0) }; + let offsets = self.offset_list.ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::Configuration) + .log_error("no offset list supplied") + })?; // Try matching exact guid if let Some(target_guid) = &self.guid { for offset in offsets.iter() { if let (Ok(file), Ok(guid)) = ( - <&str>::try_from(&offset.pdb_file_name), - <&str>::try_from(&offset.pdb_guid), + <&str>::try_from(&offset.header.pdb_file_name), + <&str>::try_from(&offset.header.pdb_guid), ) { if target_guid.file_name == file && target_guid.guid == guid { - return Ok(Win32Offsets { - 0: offset.offsets.clone(), - }); + return Ok(Win32Offsets(offset.offsets)); } } } @@ -98,16 +87,14 @@ impl Win32OffsetBuilder { // Try matching the newest build from that version that is not actually newer if let (Some(winver), Some(arch)) = (&self.winver, self.arch) { for offset in offsets.iter() { - if winver.major_version() == offset.nt_major_version - && winver.minor_version() == offset.nt_minor_version - && winver.build_number() >= offset.nt_build_number - && prev_build_number <= offset.nt_build_number - && arch == offset.arch + if winver.major_version() == offset.header.nt_major_version + && winver.minor_version() == offset.header.nt_minor_version + && winver.build_number() >= offset.header.nt_build_number + && prev_build_number <= offset.header.nt_build_number + && arch == offset.header.arch { - prev_build_number = offset.nt_build_number; - closest_match = Some(Win32Offsets { - 0: offset.offsets.clone(), - }); + prev_build_number = offset.header.nt_build_number; + closest_match = Some(Win32Offsets(offset.offsets)); } } @@ -120,35 +107,34 @@ impl Win32OffsetBuilder { } } - closest_match.ok_or(Error::Other("not found")) - } - - #[cfg(not(feature = "embed_offsets"))] - fn build_with_offset_list(&self) -> Result { - Err(Error::Other( - "embed offsets feature is deactivated on compilation", - )) + closest_match.ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::Configuration) + .log_error("no valid offset configuration found while building win32") + }) } #[cfg(feature = "symstore")] fn build_with_symbol_store(&self) -> Result { if let Some(store) = &self.symbol_store { - if self.guid.is_some() { - let pdb = store.load(self.guid.as_ref().unwrap())?; + if let Some(guid) = &self.guid { + let pdb = store.load(guid)?; Win32Offsets::from_pdb_slice(&pdb[..]) } else { - Err(Error::Other("symbol store can only be used with a guid")) + Err(Error(ErrorOrigin::OsLayer, ErrorKind::Configuration) + .log_error("symbol store can only be used with a guid")) } } else { - Err(Error::Other("symbol store is disabled")) + Err(Error(ErrorOrigin::OsLayer, ErrorKind::Configuration) + .log_error("symbol store is disabled")) } } #[cfg(not(feature = "symstore"))] fn build_with_symbol_store(&self) -> Result { - Err(Error::Other( - "symbol store is deactivated via a compilation feature", - )) + Err( + Error(ErrorOrigin::OsLayer, ErrorKind::UnsupportedOptionalFeature) + .log_error("symbol store is deactivated via a compilation feature"), + ) } #[cfg(feature = "symstore")] @@ -163,31 +149,35 @@ impl Win32OffsetBuilder { self } - pub fn guid(mut self, guid: Win32GUID) -> Self { + pub fn offset_list(mut self, offset_list: &'a [Win32OffsetFile]) -> Self { + self.offset_list = Some(offset_list); + self + } + + pub fn guid(mut self, guid: Win32Guid) -> Self { self.guid = Some(guid); self } + pub fn get_guid(&self) -> &Option { + &self.guid + } + pub fn winver(mut self, winver: Win32Version) -> Self { self.winver = Some(winver); self } + pub fn get_winver(&self) -> &Option { + &self.winver + } + pub fn arch(mut self, arch: Win32OffsetsArchitecture) -> Self { self.arch = Some(arch); self } - pub fn kernel_info(mut self, kernel_info: &KernelInfo) -> Self { - if self.guid.is_none() { - self.guid = kernel_info.kernel_guid.clone(); - } - if self.winver.is_none() { - self.winver = Some(kernel_info.kernel_winver); - } - if self.arch.is_none() { - self.arch = Some(kernel_info.start_block.arch.into()); - } - self + pub fn get_arch(&self) -> &Option { + &self.arch } } diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/offsets/mod.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/offsets/mod.rs new file mode 100644 index 0000000..4238bdd --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/offsets/mod.rs @@ -0,0 +1,489 @@ +pub mod builder; +pub use builder::Win32OffsetBuilder; + +#[cfg(feature = "symstore")] +pub mod pdb; +#[cfg(feature = "symstore")] +pub mod symstore; + +pub mod offset_table; +#[doc(hidden)] +pub use offset_table::{ + MmVadOffsetTable, Win32OffsetFile, Win32OffsetHeader, Win32OffsetTable, + Win32OffsetsArchitecture, +}; + +#[cfg(feature = "symstore")] +pub use { + self::pdb::{PdbStruct, PdbSymbols}, + symstore::*, +}; + +use std::prelude::v1::*; + +use memflow::architecture::ArchitectureIdent; + +// those only required when compiling under std environment +#[cfg(feature = "std")] +use crate::kernel::Win32Guid; +#[cfg(feature = "std")] +use memflow::error::{Error, ErrorKind, ErrorOrigin, Result}; +#[cfg(feature = "std")] +use std::{fs::File, io::Read, path::Path}; + +#[derive(Debug, Copy, Clone)] +#[repr(C)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize))] +pub struct Win32ArchOffsets { + pub peb_ldr: usize, // _PEB::Ldr + pub peb_process_params: usize, // _PEB::ProcessParameters + pub ldr_list: usize, // _PEB_LDR_DATA::InLoadOrderModuleList + pub ldr_data_base: usize, // _LDR_DATA_TABLE_ENTRY::DllBase + pub ldr_data_size: usize, // _LDR_DATA_TABLE_ENTRY::SizeOfImage + pub ldr_data_full_name: usize, // _LDR_DATA_TABLE_ENTRY::FullDllName + pub ldr_data_base_name: usize, // _LDR_DATA_TABLE_ENTRY::BaseDllName + pub ppm_image_path_name: usize, // _RTL_USER_PROCESS_PARAMETERS::ImagePathName + pub ppm_command_line: usize, // _RTL_USER_PROCESS_PARAMETERS::CommandLine +} + +pub const X86: Win32ArchOffsets = Win32ArchOffsets { + peb_ldr: 0xc, + peb_process_params: 0x10, + ldr_list: 0xc, + ldr_data_base: 0x18, + ldr_data_size: 0x20, + ldr_data_full_name: 0x24, + ldr_data_base_name: 0x2c, + ppm_image_path_name: 0x38, + ppm_command_line: 0x40, +}; + +pub const X64: Win32ArchOffsets = Win32ArchOffsets { + peb_ldr: 0x18, + peb_process_params: 0x20, + ldr_list: 0x10, + ldr_data_base: 0x30, + ldr_data_size: 0x40, + ldr_data_full_name: 0x48, + ldr_data_base_name: 0x58, + ppm_image_path_name: 0x60, + ppm_command_line: 0x70, +}; + +pub const AARCH64: Win32ArchOffsets = Win32ArchOffsets { + peb_ldr: 0x18, + peb_process_params: 0x20, + ldr_list: 0x10, + ldr_data_base: 0x30, + ldr_data_size: 0x40, + ldr_data_full_name: 0x48, + ldr_data_base_name: 0x58, + ppm_image_path_name: 0x60, + ppm_command_line: 0x70, +}; + +impl Win32OffsetsArchitecture { + #[inline] + fn offsets(&self) -> &'static Win32ArchOffsets { + match self { + Win32OffsetsArchitecture::X64 => &X64, + Win32OffsetsArchitecture::X86 => &X86, + Win32OffsetsArchitecture::AArch64 => &AARCH64, + } + } +} + +impl From for Win32ArchOffsets { + fn from(arch: ArchitectureIdent) -> Win32ArchOffsets { + *Win32OffsetsArchitecture::from(arch).offsets() + } +} + +#[repr(transparent)] +#[derive(Debug, Clone)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize))] +pub struct Win32Offsets(pub Win32OffsetTable); + +impl From for Win32Offsets { + fn from(other: Win32OffsetTable) -> Self { + Self(other) + } +} + +impl From for Win32OffsetTable { + fn from(other: Win32Offsets) -> Self { + other.0 + } +} + +impl From for Win32OffsetsArchitecture { + fn from(arch: ArchitectureIdent) -> Win32OffsetsArchitecture { + match arch { + ArchitectureIdent::X86(32, _) => Self::X86, + ArchitectureIdent::X86(64, _) => Self::X64, + ArchitectureIdent::AArch64(_) => Self::AArch64, + _ => panic!("Invalid architecture specified"), + } + } +} + +impl Win32Offsets { + #[cfg(feature = "symstore")] + pub fn from_pdb>(pdb_path: P) -> Result { + let mut file = File::open(pdb_path).map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset) + .log_warn("unable to open user-supplied pdb file") + })?; + let mut buffer = Vec::new(); + file.read_to_end(&mut buffer).map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset) + .log_warn("unable to read user-supplied pdb file") + })?; + Self::from_pdb_slice(&buffer[..]) + } + + #[cfg(feature = "symstore")] + pub fn from_pdb_slice(pdb_slice: &[u8]) -> Result { + let symbols = PdbSymbols::new(pdb_slice).map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset).log_warn("Symbols not found") + })?; + let list = PdbStruct::new(pdb_slice, "_LIST_ENTRY").map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset).log_warn("_LIST_ENTRY not found") + })?; + let kproc = PdbStruct::new(pdb_slice, "_KPROCESS").map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset).log_warn("_KPROCESS not found") + })?; + let eproc = PdbStruct::new(pdb_slice, "_EPROCESS").map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset).log_warn("_EPROCESS not found") + })?; + let ethread = PdbStruct::new(pdb_slice, "_ETHREAD").map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset).log_warn("_ETHREAD not found") + })?; + let kthread = PdbStruct::new(pdb_slice, "_KTHREAD").map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset).log_warn("_KTHREAD not found") + })?; + let teb = PdbStruct::new(pdb_slice, "_TEB").map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset).log_warn("_TEB not found") + })?; + let mm_vad = PdbStruct::new(pdb_slice, "_MMVAD_SHORT").map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset).log_warn("_MMVAD_SHORT not found") + })?; + let mm_vad_flags = PdbStruct::new(pdb_slice, "_MMVAD_FLAGS").map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset).log_warn("_MMVAD_FLAGS not found") + })?; + + let phys_mem_block = symbols + .find_symbol("MmPhysicalMemoryBlock") + .or_else(|| symbols.find_symbol("_MmPhysicalMemoryBlock")) + .copied() + .unwrap_or(0); + + let list_blink = list + .find_field("Blink") + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset) + .log_warn("_LIST_ENTRY::Blink not found") + })? + .offset as _; + + let eproc_link = eproc + .find_field("ActiveProcessLinks") + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset) + .log_warn("_EPROCESS::ActiveProcessLinks not found") + })? + .offset as _; + + let kproc_dtb = kproc + .find_field("DirectoryTableBase") + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset) + .log_warn("_KPROCESS::DirectoryTableBase not found") + })? + .offset as _; + let eproc_pid = eproc + .find_field("UniqueProcessId") + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset) + .log_warn("_EPROCESS::UniqueProcessId not found") + })? + .offset as _; + let eproc_name = eproc + .find_field("ImageFileName") + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset) + .log_warn("_EPROCESS::ImageFileName not found") + })? + .offset as _; + let eproc_peb = eproc + .find_field("Peb") + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset).log_warn("_EPROCESS::Peb not found") + })? + .offset as _; + let eproc_section_base = eproc + .find_field("SectionBaseAddress") + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset) + .log_warn("_EPROCESS::SectionBaseAddress not found") + })? + .offset as _; + let eproc_exit_status = eproc + .find_field("ExitStatus") + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset) + .log_warn("_EPROCESS::ExitStatus not found") + })? + .offset as _; + let eproc_thread_list = eproc + .find_field("ThreadListHead") + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset) + .log_warn("_EPROCESS::ThreadListHead not found") + })? + .offset as _; + + // windows 10 uses an uppercase W whereas older windows versions (windows 7) uses a lowercase w + let eproc_wow64 = match eproc + .find_field("WoW64Process") + .or_else(|| eproc.find_field("Wow64Process")) + { + Some(f) => f.offset as _, + None => 0, + }; + + // threads + let kthread_teb = kthread + .find_field("Teb") + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset).log_warn("_KTHREAD::Teb not found") + })? + .offset as _; + let ethread_list_entry = ethread + .find_field("ThreadListEntry") + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset) + .log_warn("_ETHREAD::ThreadListEntry not found") + })? + .offset as _; + let teb_peb = teb + .find_field("ProcessEnvironmentBlock") + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset) + .log_warn("_TEB::ProcessEnvironmentBlock not found") + })? + .offset as _; + let teb_peb_x86 = if let Ok(teb32) = PdbStruct::new(pdb_slice, "_TEB32").map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset).log_warn("_TEB32 not found") + }) { + teb32 + .find_field("ProcessEnvironmentBlock") + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset) + .log_warn("_TEB32::ProcessEnvironmentBlock not found") + })? + .offset as _ + } else { + 0 + }; + + let eproc_vad_root = eproc + .find_field("VadRoot") // MM_AVL_TABLE *PhysicalVadRoot / MM_AVL_TABLE VadRoot / RTL_AVL_TREE VadRoot + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::Offset) + .log_warn("_EPROCESS::VadRoot not found") + })? + .offset as _; + + // On older versions VadNode was inlined into the structure - LeftChild being the first + // field of a binary tree. + let vad_node = mm_vad + .find_field("VadNode") + .or_else(|| mm_vad.find_field("LeftChild")) + .map(|f| f.offset) + .unwrap_or(0) as _; + + let starting_vpn = mm_vad + .find_field("StartingVpn") + .map(|f| f.offset) + .unwrap_or(0) as _; + let ending_vpn = mm_vad + .find_field("EndingVpn") + .map(|f| f.offset) + .unwrap_or(0) as _; + let starting_vpn_high = mm_vad + .find_field("StartingVpnHigh") + .map(|f| f.offset) + .unwrap_or(0) as _; + let ending_vpn_high = mm_vad + .find_field("EndingVpnHigh") + .map(|f| f.offset) + .unwrap_or(0) as _; + let u = mm_vad.find_field("u").map(|f| f.offset).unwrap_or(0) as _; + + let protection_bit = mm_vad_flags + .find_field("Protection") + .map(|f| f.bit_offset) + .unwrap_or(0) as _; + + Ok(Self(Win32OffsetTable { + list_blink, + eproc_link, + + phys_mem_block, + + kproc_dtb, + + eproc_pid, + eproc_name, + eproc_peb, + eproc_section_base, + eproc_exit_status, + eproc_thread_list, + eproc_wow64, + eproc_vad_root, + + kthread_teb, + ethread_list_entry, + teb_peb, + teb_peb_x86, + + mmvad: MmVadOffsetTable { + vad_node, + starting_vpn, + ending_vpn, + starting_vpn_high, + ending_vpn_high, + u, + protection_bit, + }, + })) + } + + /// _LIST_ENTRY::Blink offset + pub fn list_blink(&self) -> usize { + self.0.list_blink as usize + } + /// _LIST_ENTRY::Flink offset + pub fn eproc_link(&self) -> usize { + self.0.eproc_link as usize + } + + /// MmPhysicalMemoryBlock offset + pub fn phys_mem_block(&self) -> usize { + self.0.phys_mem_block as usize + } + + /// _KPROCESS::DirectoryTableBase offset + /// Exists since version 3.10 + pub fn kproc_dtb(&self) -> usize { + self.0.kproc_dtb as usize + } + /// _EPROCESS::UniqueProcessId offset + /// Exists since version 3.10 + pub fn eproc_pid(&self) -> usize { + self.0.eproc_pid as usize + } + /// _EPROCESS::ImageFileName offset + /// Exists since version 3.10 + pub fn eproc_name(&self) -> usize { + self.0.eproc_name as usize + } + /// _EPROCESS::Peb offset + /// Exists since version 5.10 + pub fn eproc_peb(&self) -> usize { + self.0.eproc_peb as usize + } + /// _EPROCESS::SectionBaseAddress offset + /// Exists since version 3.10 + pub fn eproc_section_base(&self) -> usize { + self.0.eproc_section_base as usize + } + /// _EPROCESS::ExitStatus offset + /// Exists since version 3.10 + pub fn eproc_exit_status(&self) -> usize { + self.0.eproc_exit_status as usize + } + /// _EPROCESS::ThreadListHead offset + /// Exists since version 5.10 + pub fn eproc_thread_list(&self) -> usize { + self.0.eproc_thread_list as usize + } + /// _EPROCESS::VadRoot offset + /// Exists since version 5.0 + pub fn eproc_wow64(&self) -> usize { + self.0.eproc_wow64 as usize + } + /// _EPROCESS::WoW64Process offset + /// Exists since version xxx + pub fn eproc_vad_root(&self) -> usize { + self.0.eproc_vad_root as usize + } + + /// _KTHREAD::Teb offset + /// Exists since version 6.2 + pub fn kthread_teb(&self) -> usize { + self.0.kthread_teb as usize + } + /// _ETHREAD::ThreadListEntry offset + /// Exists since version 6.2 + pub fn ethread_list_entry(&self) -> usize { + self.0.ethread_list_entry as usize + } + /// _TEB::ProcessEnvironmentBlock offset + /// Exists since version x.x + pub fn teb_peb(&self) -> usize { + self.0.teb_peb as usize + } + /// _TEB32::ProcessEnvironmentBlock offset + /// Exists since version x.x + pub fn teb_peb_x86(&self) -> usize { + self.0.teb_peb_x86 as usize + } + + /// _MMVAD_SHORT offsets + pub fn mm_vad(&self) -> MmVadOffsetTable { + self.0.mmvad + } + + pub fn builder<'a>() -> Win32OffsetBuilder<'a> { + Win32OffsetBuilder::default() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + // this test is not ideal for the CI so it's disabled for now. + /* + #[test] + fn download_pdb() { + let guid = Win32Guid { + file_name: "ntkrnlmp.pdb".to_string(), + guid: "3844DBB920174967BE7AA4A2C20430FA2".to_string(), + }; + let offsets = Win32Offsets::builder() + .symbol_store(SymbolStore::new().no_cache()) + .guid(guid) + .build() + .unwrap(); + + assert_eq!(offsets.0.list_blink, 8); + assert_eq!(offsets.0.eproc_link, 392); + + assert_eq!(offsets.0.kproc_dtb, 40); + + assert_eq!(offsets.0.eproc_pid, 384); + assert_eq!(offsets.0.eproc_name, 736); + assert_eq!(offsets.0.eproc_peb, 824); + assert_eq!(offsets.0.eproc_thread_list, 776); + assert_eq!(offsets.0.eproc_wow64, 800); + + assert_eq!(offsets.0.kthread_teb, 184); + assert_eq!(offsets.0.ethread_list_entry, 1056); + assert_eq!(offsets.0.teb_peb, 96); + assert_eq!(offsets.0.teb_peb_x86, 48); + } + */ +} diff --git a/apex_dma/memflow_lib/memflow-win32/src/offsets/offset_table.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/offsets/offset_table.rs similarity index 64% rename from apex_dma/memflow_lib/memflow-win32/src/offsets/offset_table.rs rename to apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/offsets/offset_table.rs index f79dfa5..dcb6a41 100644 --- a/apex_dma/memflow_lib/memflow-win32/src/offsets/offset_table.rs +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/offsets/offset_table.rs @@ -1,9 +1,10 @@ use std::prelude::v1::*; -use dataview::Pod; use std::convert::TryFrom; use std::str; +use memflow::dataview::Pod; + /// Describes an offset file. /// At compile time this crate will create a binary blob of all /// TOML files contained in the memflow-win32/offsets/ folder @@ -21,14 +22,22 @@ use std::str; // # Safety // This struct guarantees that it does not contain any padding. #[repr(C, align(4))] -#[derive(Clone, Pod)] +#[derive(Debug, Clone, Pod)] #[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] pub struct Win32OffsetFile { - // Win32GUID + pub header: Win32OffsetHeader, + pub offsets: Win32OffsetTable, +} + +#[repr(C, align(4))] +#[derive(Debug, Clone, Pod)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +pub struct Win32OffsetHeader { + // Win32Guid #[cfg_attr(feature = "serde", serde(default))] - pub pdb_file_name: BinaryString, + pub pdb_file_name: BinaryString<128>, #[cfg_attr(feature = "serde", serde(default))] - pub pdb_guid: BinaryString, + pub pdb_guid: BinaryString<128>, // Win32Version pub nt_major_version: u32, @@ -37,10 +46,14 @@ pub struct Win32OffsetFile { // Architecture pub arch: Win32OffsetsArchitecture, - - pub offsets: Win32OffsetTable, } +const _: [(); std::mem::size_of::<[Win32OffsetHeader; 16]>()] = + [(); 16 * std::mem::size_of::()]; + +const _: [(); std::mem::size_of::<[Win32OffsetTable; 16]>()] = + [(); 16 * std::mem::size_of::()]; + const _: [(); std::mem::size_of::<[Win32OffsetFile; 16]>()] = [(); 16 * std::mem::size_of::()]; @@ -55,35 +68,40 @@ pub enum Win32OffsetsArchitecture { impl std::fmt::Display for Win32OffsetsArchitecture { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self) + write!(f, "{self:?}") } } unsafe impl Pod for Win32OffsetsArchitecture {} -// TODO: use const-generics here once they are fully stabilized #[derive(Clone)] -pub struct BinaryString(pub [u8; 128]); +pub struct BinaryString(pub [u8; N]); -impl Default for BinaryString { +impl std::fmt::Debug for BinaryString { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", <&str>::try_from(self)) + } +} + +impl Default for BinaryString { fn default() -> Self { (&[][..]).into() } } -impl<'a> From<&'a [u8]> for BinaryString { +impl<'a, const N: usize> From<&'a [u8]> for BinaryString { fn from(other: &'a [u8]) -> Self { - let mut arr = [0; 128]; + let mut arr = [0; N]; arr[..other.len()].copy_from_slice(other); - Self { 0: arr } + Self(arr) } } -impl<'a> TryFrom<&'a BinaryString> for &'a str { +impl<'a, const N: usize> TryFrom<&'a BinaryString> for &'a str { type Error = std::str::Utf8Error; - fn try_from(other: &'a BinaryString) -> Result { + fn try_from(other: &'a BinaryString) -> Result { Ok(str::from_utf8(&other.0)? .split_terminator('\0') .next() @@ -91,26 +109,26 @@ impl<'a> TryFrom<&'a BinaryString> for &'a str { } } -impl<'a> From<&'a str> for BinaryString { +impl<'a, const N: usize> From<&'a str> for BinaryString { fn from(other: &'a str) -> Self { - let mut arr = [0; 128]; + let mut arr = [0; N]; arr[..other.len()].copy_from_slice(other.as_bytes()); - Self { 0: arr } + Self(arr) } } -impl From for BinaryString { +impl From for BinaryString { fn from(other: String) -> Self { Self::from(other.as_str()) } } -unsafe impl Pod for BinaryString {} +unsafe impl Pod for BinaryString {} #[cfg(feature = "serde")] -impl ::serde::Serialize for BinaryString { +impl ::serde::Serialize for BinaryString { fn serialize(&self, serializer: S) -> Result where S: ::serde::Serializer, @@ -123,15 +141,15 @@ impl ::serde::Serialize for BinaryString { } #[cfg(feature = "serde")] -impl<'de> ::serde::de::Deserialize<'de> for BinaryString { +impl<'de, const N: usize> ::serde::de::Deserialize<'de> for BinaryString { fn deserialize(deserializer: D) -> Result where D: ::serde::de::Deserializer<'de>, { - struct BinaryStringVisitor; + struct BinaryStringVisitor; - impl<'de> ::serde::de::Visitor<'de> for BinaryStringVisitor { - type Value = [u8; 128]; + impl<'de, const N: usize> ::serde::de::Visitor<'de> for BinaryStringVisitor { + type Value = [u8; N]; fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { formatter.write_str("a string containing json data") @@ -143,7 +161,7 @@ impl<'de> ::serde::de::Deserialize<'de> for BinaryString { { // unfortunately we lose some typed information // from errors deserializing the json string - let mut result = [0u8; 128]; + let mut result = [0u8; N]; result[..v.len()].copy_from_slice(v.as_bytes()); @@ -152,18 +170,20 @@ impl<'de> ::serde::de::Deserialize<'de> for BinaryString { } // use our visitor to deserialize an `ActualValue` - let inner: [u8; 128] = deserializer.deserialize_any(BinaryStringVisitor)?; - Ok(Self { 0: inner }) + let inner: [u8; N] = deserializer.deserialize_any(BinaryStringVisitor)?; + Ok(Self(inner)) } } #[repr(C, align(4))] -#[derive(Debug, Clone, Pod)] +#[derive(Debug, Copy, Clone, Pod)] #[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] pub struct Win32OffsetTable { pub list_blink: u32, pub eproc_link: u32, + pub phys_mem_block: u32, + /// Since version 3.10 pub kproc_dtb: u32, /// Since version 3.10 @@ -180,6 +200,8 @@ pub struct Win32OffsetTable { pub eproc_thread_list: u32, /// Since version 5.0 pub eproc_wow64: u32, + /// Since version xxx + pub eproc_vad_root: u32, /// Since version 6.2 pub kthread_teb: u32, @@ -189,4 +211,19 @@ pub struct Win32OffsetTable { pub teb_peb: u32, /// Since version x.x pub teb_peb_x86: u32, + + pub mmvad: MmVadOffsetTable, +} + +#[repr(C, align(4))] +#[derive(Debug, Copy, Clone, Pod)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +pub struct MmVadOffsetTable { + pub vad_node: u32, + pub starting_vpn: u32, + pub ending_vpn: u32, + pub starting_vpn_high: u32, + pub ending_vpn_high: u32, + pub u: u32, + pub protection_bit: u32, } diff --git a/apex_dma/memflow_lib/memflow-win32/src/offsets/pdb_struct.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/offsets/pdb.rs similarity index 69% rename from apex_dma/memflow_lib/memflow-win32/src/offsets/pdb_struct.rs rename to apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/offsets/pdb.rs index aa3bf49..0e5b5c3 100644 --- a/apex_dma/memflow_lib/memflow-win32/src/offsets/pdb_struct.rs +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/offsets/pdb.rs @@ -1,5 +1,6 @@ mod data; +use std::convert::TryInto; use std::prelude::v1::*; use data::TypeSet; @@ -8,10 +9,42 @@ use std::{fmt, io, result}; use pdb::{FallibleIterator, Result, Source, SourceSlice, SourceView, TypeData, PDB}; +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct PdbSymbols { + symbol_map: HashMap, +} + +impl PdbSymbols { + pub fn new(pdb_slice: &[u8]) -> Result { + let pdb_buffer = PdbSourceBuffer::new(pdb_slice); + let mut pdb = PDB::open(pdb_buffer)?; + + let symbol_table = pdb.global_symbols()?; + let address_map = pdb.address_map()?; + + let mut symbol_map = HashMap::new(); + + let mut symbols = symbol_table.iter(); + while let Some(symbol) = symbols.next()? { + if let Ok(pdb::SymbolData::Public(data)) = symbol.parse() { + let rva = data.offset.to_rva(&address_map).unwrap_or_default(); + symbol_map.insert(data.name.to_string().into(), rva.0); + } + } + + Ok(Self { symbol_map }) + } + + pub fn find_symbol(&self, name: &str) -> Option<&u32> { + self.symbol_map.get(name) + } +} + #[derive(Debug, Clone, PartialEq, Eq)] pub struct PdbField { pub type_name: String, pub offset: usize, + pub bit_offset: usize, } #[derive(Debug, Clone, PartialEq, Eq)] @@ -20,7 +53,7 @@ pub struct PdbStruct { } impl PdbStruct { - pub fn with(pdb_slice: &[u8], class_name: &str) -> Result { + pub fn new(pdb_slice: &[u8], class_name: &str) -> Result { let pdb_buffer = PdbSourceBuffer::new(pdb_slice); let mut pdb = PDB::open(pdb_buffer)?; @@ -48,10 +81,7 @@ impl PdbStruct { // add all the needed types iteratively until we're done loop { // get the last element in needed_types without holding an immutable borrow - let last = match needed_types.iter().next_back() { - Some(n) => Some(*n), - None => None, - }; + let last = needed_types.iter().next_back().copied(); if let Some(type_index) = last { // remove it @@ -71,7 +101,8 @@ impl PdbStruct { f.name.to_string().into_owned(), PdbField { type_name: f.type_name.clone(), - offset: f.offset as usize, + offset: f.offset as usize, // u16 can always be safely converted into usize + bit_offset: f.bit_offset as usize, // u8 can always be safely converted into usize }, ); }); @@ -106,7 +137,7 @@ impl<'a, 's> Source<'s> for PdbSourceBuffer<'a> { &mut self, slices: &[SourceSlice], ) -> result::Result>, io::Error> { - let len = slices.iter().fold(0 as usize, |acc, s| acc + s.size); + let len = slices.iter().fold(0_usize, |acc, s| acc + s.size); let mut v = PdbSourceBufferView { bytes: Vec::with_capacity(len), @@ -116,9 +147,9 @@ impl<'a, 's> Source<'s> for PdbSourceBuffer<'a> { let bytes = v.bytes.as_mut_slice(); let mut output_offset: usize = 0; for slice in slices { - bytes[output_offset..(output_offset + slice.size)].copy_from_slice( - &self.bytes[slice.offset as usize..(slice.offset as usize + slice.size)], - ); + let offset = slice.offset.try_into().unwrap(); + bytes[output_offset..(output_offset + slice.size)] + .copy_from_slice(&self.bytes[offset..(offset + slice.size)]); output_offset += slice.size; } diff --git a/apex_dma/memflow_lib/memflow-win32/src/offsets/pdb_struct/data.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/offsets/pdb/data.rs similarity index 94% rename from apex_dma/memflow_lib/memflow-win32/src/offsets/pdb_struct/data.rs rename to apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/offsets/pdb/data.rs index 98b7827..c87da64 100644 --- a/apex_dma/memflow_lib/memflow-win32/src/offsets/pdb_struct/data.rs +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/offsets/pdb/data.rs @@ -7,8 +7,8 @@ use std::collections::BTreeSet; pub type TypeSet = BTreeSet; -pub fn type_name<'p>( - type_finder: &pdb::TypeFinder<'p>, +pub fn type_name( + type_finder: &pdb::TypeFinder<'_>, type_index: pdb::TypeIndex, needed_types: &mut TypeSet, ) -> pdb::Result { @@ -83,12 +83,12 @@ pub fn type_name<'p>( pdb::TypeData::Array(data) => { let mut name = type_name(type_finder, data.element_type, needed_types)?; for size in data.dimensions { - name = format!("{}[{}]", name, size); + name = format!("{name}[{size}]"); } name } - _ => format!("Type{} /* TODO: figure out how to name it */", type_index), + x => format!("Type{type_index} /* TODO: figure out how to name it {x:?} */"), }; // TODO: search and replace std:: patterns @@ -110,14 +110,8 @@ pub struct Class<'p> { } impl<'p> Class<'p> { - fn add_derived_from( - &mut self, - _: &pdb::TypeFinder<'p>, - _: pdb::TypeIndex, - _: &mut TypeSet, - ) -> pdb::Result<()> { + fn add_derived_from(&mut self, _: &pdb::TypeFinder<'p>, _: pdb::TypeIndex, _: &mut TypeSet) { // TODO - Ok(()) } fn add_fields( @@ -158,10 +152,17 @@ impl<'p> Class<'p> { match *field { pdb::TypeData::Member(ref data) => { // TODO: attributes (static, virtual, etc.) + + let bit_offset = match type_finder.find(data.field_type)?.parse()? { + pdb::TypeData::Bitfield(bitfield) => bitfield.position, + _ => 0, + }; + self.fields.push(Field { type_name: type_name(type_finder, data.field_type, needed_types)?, name: data.name, offset: data.offset, + bit_offset, }); } @@ -246,7 +247,8 @@ pub struct BaseClass { pub struct Field<'p> { pub type_name: String, pub name: pdb::RawString<'p>, - pub offset: u16, + pub offset: u64, + pub bit_offset: u8, } #[derive(Debug, Clone, PartialEq, Eq)] @@ -281,8 +283,8 @@ impl<'p> Method<'p> { } } -fn argument_list<'p>( - type_finder: &pdb::TypeFinder<'p>, +fn argument_list( + type_finder: &pdb::TypeFinder<'_>, type_index: pdb::TypeIndex, needed_types: &mut TypeSet, ) -> pdb::Result> { @@ -317,7 +319,7 @@ impl<'p> Enum<'p> { match type_finder.find(type_index)?.parse()? { pdb::TypeData::FieldList(data) => { for field in &data.fields { - self.add_field(type_finder, field, needed_types)?; + self.add_field(type_finder, field, needed_types); } if let Some(continuation) = data.continuation { @@ -337,12 +339,7 @@ impl<'p> Enum<'p> { Ok(()) } - fn add_field( - &mut self, - _: &pdb::TypeFinder<'p>, - field: &pdb::TypeData<'p>, - _: &mut TypeSet, - ) -> pdb::Result<()> { + fn add_field(&mut self, _: &pdb::TypeFinder<'p>, field: &pdb::TypeData<'p>, _: &mut TypeSet) { // ignore everything else even though that's sad if let pdb::TypeData::Enumerate(ref data) = field { self.values.push(EnumValue { @@ -350,8 +347,6 @@ impl<'p> Enum<'p> { value: data.value, }); } - - Ok(()) } } @@ -410,7 +405,7 @@ impl<'p> Data<'p> { }; if let Some(derived_from) = data.derived_from { - class.add_derived_from(type_finder, derived_from, needed_types)?; + class.add_derived_from(type_finder, derived_from, needed_types); } if let Some(fields) = data.fields { diff --git a/apex_dma/memflow_lib/memflow-win32/src/offsets/symstore.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/offsets/symstore.rs similarity index 60% rename from apex_dma/memflow_lib/memflow-win32/src/offsets/symstore.rs rename to apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/offsets/symstore.rs index d305a12..009f6f0 100644 --- a/apex_dma/memflow_lib/memflow-win32/src/offsets/symstore.rs +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32-defs/src/offsets/symstore.rs @@ -1,18 +1,19 @@ use std::prelude::v1::*; -use crate::error::{Error, Result}; -use crate::offsets::Win32GUID; +use crate::offsets::Win32Guid; use std::fs::{self, File}; use std::io::{Read, Write}; use std::path::{Path, PathBuf}; -use dirs::home_dir; +use dirs::cache_dir; use log::info; +use memflow::error::{Error, ErrorKind, ErrorOrigin, Result}; + #[cfg(feature = "download_progress")] use { - pbr::ProgressBar, + indicatif::{ProgressBar, ProgressStyle}, progress_streams::ProgressReader, std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}, std::sync::Arc, @@ -26,7 +27,11 @@ fn read_to_end(reader: &mut T, len: usize) -> Result> { let mut reader = ProgressReader::new(reader, |progress: usize| { total.fetch_add(progress, Ordering::SeqCst); }); - let mut pb = ProgressBar::new(len as u64); + let pb = ProgressBar::new(len as u64); + pb.set_style(ProgressStyle::default_bar() + .template("{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({eta})") + .unwrap() + .progress_chars("#>-")); let finished = Arc::new(AtomicBool::new(false)); let thread = { @@ -35,16 +40,16 @@ fn read_to_end(reader: &mut T, len: usize) -> Result> { std::thread::spawn(move || { while !finished_thread.load(Ordering::Relaxed) { - pb.set(total_thread.load(Ordering::SeqCst) as u64); + pb.set_position(total_thread.load(Ordering::SeqCst) as u64); std::thread::sleep(std::time::Duration::from_millis(10)); } - pb.finish(); + pb.finish_with_message("downloaded"); }) }; - reader - .read_to_end(&mut buffer) - .map_err(|_| Error::SymbolStore("unable to read from http request"))?; + reader.read_to_end(&mut buffer).map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::Http).log_error("unable to read from http request") + })?; finished.store(true, Ordering::Relaxed); thread.join().unwrap(); @@ -54,7 +59,9 @@ fn read_to_end(reader: &mut T, len: usize) -> Result> { #[cfg(not(feature = "download_progress"))] fn read_to_end(reader: &mut T, _len: usize) -> Result> { let mut buffer = vec![]; - reader.read_to_end(&mut buffer)?; + reader.read_to_end(&mut buffer).map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::Http).log_error("unable to read from http request") + })?; Ok(buffer) } @@ -66,10 +73,10 @@ pub struct SymbolStore { impl Default for SymbolStore { fn default() -> Self { - let home_dir = home_dir().expect("unable to get home directory"); + let cache_dir = cache_dir().expect("unable to get cache directory"); Self { base_url: "https://msdl.microsoft.com/download/symbols".to_string(), - cache_path: Some(home_dir.join(".memflow").join("cache")), + cache_path: Some(cache_dir.join("memflow")), } } } @@ -79,7 +86,7 @@ impl SymbolStore { Self::default() } - pub fn load(&self, guid: &Win32GUID) -> Result> { + pub fn load(&self, guid: &Win32Guid) -> Result> { if let Some(cache_path) = &self.cache_path { let cache_dir = cache_path.join(guid.file_name.clone()); let cache_file = cache_dir.join(guid.guid.clone()); @@ -89,11 +96,15 @@ impl SymbolStore { "reading pdb from local cache: {}", cache_file.to_string_lossy() ); - let mut file = File::open(cache_file) - .map_err(|_| Error::SymbolStore("unable to open pdb in local cache"))?; + let mut file = File::open(cache_file).map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::UnableToReadFile) + .log_error("unable to open pdb in local cache") + })?; let mut buffer = Vec::new(); - file.read_to_end(&mut buffer) - .map_err(|_| Error::SymbolStore("unable to read pdb from local cache"))?; + file.read_to_end(&mut buffer).map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::UnableToReadFile) + .log_error("unable to read pdb from local cache") + })?; buffer } else { let buffer = self.download(guid)?; @@ -101,7 +112,8 @@ impl SymbolStore { if !cache_dir.exists() { info!("creating cache directory {:?}", cache_dir.to_str()); fs::create_dir_all(&cache_dir).map_err(|_| { - Error::SymbolStore("unable to create folder in local pdb cache") + Error(ErrorOrigin::OsLayer, ErrorKind::UnableToCreateDirectory) + .log_error("unable to create folder in local pdb cache") })?; } @@ -109,10 +121,14 @@ impl SymbolStore { "writing pdb to local cache: {}", cache_file.to_string_lossy() ); - let mut file = File::create(cache_file) - .map_err(|_| Error::SymbolStore("unable to create file in local pdb cache"))?; - file.write_all(&buffer[..]) - .map_err(|_| Error::SymbolStore("unable to write pdb to local cache"))?; + let mut file = File::create(cache_file).map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::UnableToWriteFile) + .log_error("unable to create file in local pdb cache") + })?; + file.write_all(&buffer[..]).map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::UnableToWriteFile) + .log_error("unable to write pdb to local cache") + })?; buffer }; @@ -123,7 +139,7 @@ impl SymbolStore { } } - fn download(&self, guid: &Win32GUID) -> Result> { + fn download(&self, guid: &Win32Guid) -> Result> { let pdb_url = format!("{}/{}/{}", self.base_url, guid.file_name, guid.guid); self.download_file(&format!("{}/{}", pdb_url, guid.file_name)) @@ -132,10 +148,9 @@ impl SymbolStore { fn download_file(&self, url: &str) -> Result> { info!("downloading pdb from {}", url); - let resp = ureq::get(url).call(); - if !resp.ok() { - return Err(Error::SymbolStore("unable to download pdb")); - } + let resp = ureq::get(url).call().map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::Http).log_error("unable to download pdb") + })?; assert!(resp.has("Content-Length")); let len = resp diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32/Cargo.toml b/apex_dma/memflow_lib/memflow-win32/memflow-win32/Cargo.toml new file mode 100644 index 0000000..e65d8ad --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/Cargo.toml @@ -0,0 +1,70 @@ +[package] +name = "memflow-win32" +version = "0.2.0" +authors = ["ko1N ", "Aurimas Blažulionis <0x60@pm.me>"] +edition = "2021" +rust-version = "1.65" +description = "win32 integration of the memflow physical memory introspection framework" +documentation = "https://docs.rs/memflow-win32" +readme = "../README.md" +homepage = "https://memflow.io" +repository = "https://github.com/memflow/memflow-win32" +license = "MIT" +keywords = [ "memflow", "introspection", "memory", "dma" ] +categories = [ "api-bindings", "memory-management", "os" ] + +[badges] +maintenance = { status = "actively-developed" } +codecov = { repository = "github", branch = "master", service = "github" } + +[lib] +crate-type = ["lib", "cdylib"] + +[dependencies] +memflow = { version = "0.2", default-features = false } +log = { version = "^0.4.14", default-features = false } +pelite = { version = "^0.10.0", default-features = false } +widestring = { version = "^1.0.2", default-features = false, features = ["alloc"] } +no-std-compat = { version = "^0.4.1", features = ["alloc"] } +serde = { version = "^1.0.133", default-features = false, optional = true, features = ["derive"] } +memflow-win32-defs = { version = "0.2", path = "../memflow-win32-defs", default-features = false } + +# will be replaced by our own signature scanner +regex = { version = "^1.5.0", optional = true } + +[dev_dependencies] +simplelog = "^0.12.0" +rand = "^0.8.4" +rand_xorshift = "^0.3.0" +clap = { version = "^4.0.26", features = ["cargo"] } +toml = "^0.7.3" + +[build_dependencies] +toml = "^0.7.3" +serde = { version = "^1.0.133", default-features = false, features = ["derive", "alloc"] } +memflow = { version = "0.2", default-features = false } +memflow-win32-defs = { version = "0.2", path = "../memflow-win32-defs", features = ["symstore"] } + +[features] +default = ["std", "serde_derive", "embed_offsets", "symstore", "download_progress", "regex", "memflow/default"] +std = ["no-std-compat/std", "memflow/std", "pelite/std"] +plugins = ["memflow/plugins"] +embed_offsets = ["serde", "memflow/serde_derive", "memflow-win32-defs/serde"] +serde_derive = ["serde", "memflow/serde_derive", "pelite/std", "pelite/serde", "memflow-win32-defs/serde"] +symstore = ["memflow-win32-defs/symstore"] +download_progress = ["memflow-win32-defs/download_progress"] + +[[example]] +name = "dump_offsets" +path = "examples/dump_offsets.rs" +required-features = ["memflow/serde_derive"] + +[[example]] +name = "open_process" +path = "examples/open_process.rs" +required-features = ["memflow/plugins"] + +[[example]] +name = "process_list" +path = "examples/process_list.rs" +required-features = ["memflow/plugins"] diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32/build.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/build.rs new file mode 100644 index 0000000..951f181 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/build.rs @@ -0,0 +1,168 @@ +use std::{ + env, + error::Error, + fs::{self, File}, + io::{Read, Write}, + path::Path, +}; + +use memflow::dataview::PodMethods; +use std::convert::TryInto; + +#[cfg(feature = "embed_offsets")] +use memflow_win32_defs::{ + kernel::*, + offsets::{ + offset_table::{Win32OffsetFile, Win32OffsetHeader}, + *, + }, +}; + +#[repr(C, align(4))] +#[derive(Debug, ::serde::Serialize, ::serde::Deserialize)] +#[cfg(feature = "embed_offsets")] +pub struct Win32OffsetFileStart { + header: Win32OffsetHeader, +} + +#[cfg(feature = "embed_offsets")] +fn embed_offsets() -> Result<(), Box> { + let regenerate_offsets = + match &*env::var("MEMFLOW_WIN32_REGENERATE_OFFSETS").unwrap_or_default() { + "on" | "1" => 1, + "force" => 2, + _ => 0, + }; + + let out_dir = env::var("OUT_DIR")?; + let dest_path = Path::new(&out_dir).join("win32_offsets.bin"); + let mut all_the_files = File::create(dest_path)?; + + // iterate offsets folder + for f in fs::read_dir("./offsets")? { + let f = f?; + + if !f.file_type()?.is_file() { + continue; + } + + let fp = f.path(); + + let mut file = File::open(&fp)?; + let mut tomlstr = String::new(); + file.read_to_string(&mut tomlstr)?; + + std::mem::drop(file); + + let offsets = match toml::from_str::(&tomlstr) { + x if regenerate_offsets == 2 || (regenerate_offsets == 1 && x.is_err()) => { + let regenerate = || -> Result> { + let mut header = match toml::from_str::(&tomlstr) { + Ok(header) => header, + _ => { + // Parse header from the filename. Example format: + // 10_0_18362_X64_0AFB69F5FD264D54673570E37B38A3181.toml + let fname = fp.file_name().and_then(|s| s.to_str()).unwrap_or_default(); + + let parts = fname.split_terminator('_').collect::>(); + + if parts.len() != 5 { + return Err(format!( + "Cannot regenerate offsets for {fname} - invalid filename format" + ) + .into()); + } + + let nt_major_version = str::parse(parts[0])?; + let nt_minor_version = str::parse(parts[1])?; + let nt_build_number = str::parse(parts[2])?; + let pdb_guid = parts[4]; + + if !pdb_guid.ends_with(".toml") { + return Err(format!("{fname} does not contain valid guid ({pdb_guid})").into()); + } + + Win32OffsetFileStart { + header: Win32OffsetHeader { + pdb_file_name: "".into(), + pdb_guid: pdb_guid.trim_end_matches(".toml").into(), + arch: toml::from_str(&format!("\"{}\"", parts[3]))?, + nt_major_version, + nt_minor_version, + nt_build_number, + }, + } + } + }.header; + + println!("{header:?}"); + + // Use pdb file name if available, backup if only file name is known. + + let file_names1: [&str; 1] = [(&header.pdb_file_name).try_into()?]; + + let file_names2 = ["ntkrnlmp.pdb", "ntkrpamp.pdb"]; + + let file_names = if file_names1[0].is_empty() { + &file_names2[..] + } else { + &file_names1[..] + }; + + let guid = (&header.pdb_guid).try_into()?; + + let (offsets, pdb_file_name) = file_names + .iter() + .map(|f| { + Win32Offsets::builder() + .symbol_store(SymbolStore::new()) + .guid(Win32Guid::new(f, guid)) + .build() + .map(|v| (v.0, f)) + }) + .find_map(Result::ok) + .ok_or("Failed to get symbols from the symbol store")?; + + header.pdb_file_name = (*pdb_file_name).into(); + + println!("{header:?}"); + println!("{offsets:?}"); + + Ok(Win32OffsetFile { header, offsets }) + }; + + match regenerate() { + Ok(x) => { + let mut file = File::create(&fp)?; + file.write_all(toml::to_string(&x)?.as_bytes())?; + + x + } + Err(e) => { + // Do not report failure if forced, but already contains correct configuration + if x.is_ok() { + x? + } else { + return Err(e); + } + } + } + } + x => x?, + }; + + all_the_files.write_all(offsets.as_bytes())?; + } + + Ok(()) +} + +#[cfg(not(feature = "embed_offsets"))] +fn embed_offsets() -> Result<(), Box> { + Ok(()) +} + +fn main() -> Result<(), Box> { + embed_offsets()?; + Ok(()) +} diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32/examples/dump_offsets.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/examples/dump_offsets.rs new file mode 100644 index 0000000..54a8d24 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/examples/dump_offsets.rs @@ -0,0 +1,120 @@ +/*! +This example shows how to use a dynamically loaded connector in conjunction +with memflow-win32. This example uses the `Inventory` feature of memflow +but hard-wires the connector instance into the memflow-win32 OS layer. + +The example then dumps all the found offsets into the specified `output` file. + +# Usage: +```bash +cargo run --release --example dump_offsets -- -vv -c kvm --output file.toml +``` +*/ +use std::fs::File; +use std::io::Write; + +use clap::*; +use log::{error, Level}; + +use memflow::prelude::v1::{Result, *}; +use memflow_win32::prelude::v1::*; + +pub fn main() -> Result<()> { + let matches = parse_args(); + let (chain, output) = extract_args(&matches)?; + + // create inventory + connector + let inventory = Inventory::scan(); + let connector = inventory.builder().connector_chain(chain).build()?; + + let os = Win32Kernel::builder(connector) + .build_default_caches() + .build() + .unwrap(); + + let winver = os.kernel_info.kernel_winver; + + if winver != (0, 0).into() { + let guid = os.kernel_info.kernel_guid.unwrap_or_default(); + let offsets = Win32OffsetFile { + header: Win32OffsetHeader { + pdb_file_name: guid.file_name.as_str().into(), + pdb_guid: guid.guid.as_str().into(), + + arch: os.kernel_info.os_info.arch.into(), + + nt_major_version: winver.major_version(), + nt_minor_version: winver.minor_version(), + nt_build_number: winver.build_number(), + }, + offsets: os.offsets.into(), + }; + + // write offsets to file + let offsetstr = toml::to_string_pretty(&offsets).unwrap(); + match output { + Some(output) => { + let mut file = File::create(output).unwrap(); + file.write_all(offsetstr.as_bytes()).unwrap(); + } + None => println!("{offsetstr}"), + } + } else { + error!("kernel version has to be valid in order to generate a offsets file"); + } + + Ok(()) +} + +fn parse_args() -> ArgMatches { + Command::new("dump_offsets example") + .version(crate_version!()) + .author(crate_authors!()) + .arg(Arg::new("verbose").short('v').action(ArgAction::Count)) + .arg( + Arg::new("connector") + .short('c') + .action(ArgAction::Append) + .required(true), + ) + .arg(Arg::new("os").short('o').action(ArgAction::Append)) + .arg(Arg::new("output").long("output").action(ArgAction::Set)) + .get_matches() +} + +fn extract_args(matches: &ArgMatches) -> Result<(ConnectorChain<'_>, Option<&str>)> { + let log_level = match matches.get_count("verbose") { + 0 => Level::Error, + 1 => Level::Warn, + 2 => Level::Info, + 3 => Level::Debug, + 4 => Level::Trace, + _ => Level::Trace, + }; + simplelog::TermLogger::init( + log_level.to_level_filter(), + simplelog::Config::default(), + simplelog::TerminalMode::Stdout, + simplelog::ColorChoice::Auto, + ) + .unwrap(); + + let conn_iter = matches + .indices_of("connector") + .zip(matches.get_many::("connector")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + let os_iter = matches + .indices_of("os") + .zip(matches.get_many::("os")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + Ok(( + ConnectorChain::new(conn_iter, os_iter)?, + matches.get_one::("output").map(String::as_str), + )) +} diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32/examples/open_process.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/examples/open_process.rs new file mode 100644 index 0000000..b46f5d3 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/examples/open_process.rs @@ -0,0 +1,107 @@ +/*! +This example shows how to use a dynamically loaded connector in conjunction +with memflow-win32. This example uses the `Inventory` feature of memflow +but hard-wires the connector instance into the memflow-win32 OS layer. + +The example showcases how to retrieve extended process info data, +opening the process and getting a list of all modules. + +# Usage: +```bash +cargo run --release --example open_process -- -vv -c kvm -p "explorer.exe" +``` +*/ +use clap::*; +use log::{info, Level}; + +use memflow::prelude::v1::*; +use memflow_win32::prelude::v1::*; + +pub fn main() -> Result<()> { + let matches = parse_args(); + let (chain, process_name) = extract_args(&matches)?; + let process_name = process_name.unwrap_or("explorer.exe"); + + // create inventory + connector + let inventory = Inventory::scan(); + let connector = inventory.builder().connector_chain(chain).build()?; + + let mut os = Win32Kernel::builder(connector) + .build_default_caches() + .build() + .expect("unable to initialize memflow-win32"); + + // display the extended process info for the process + let process_info = os.process_info_by_name(process_name)?; + let process_info_ext = os.process_info_from_base_info(process_info.clone())?; + info!("{:?}", process_info_ext); + + // create a new process instance + let mut process = os + .into_process_by_info(process_info) + .expect("unable to open process"); + + // retrieve all modules + let module_list = process.module_list().expect("unable to read module list"); + + info!("{:>5} {:>10} {:^32} {:<}", "ADDR", "BASE", "NAME", "PATH"); + + for m in module_list { + info!("{:>5} {:^16} {:^32} {}", m.address, m.base, m.name, m.path); + } + + Ok(()) +} + +fn parse_args() -> ArgMatches { + Command::new("dump_offsets example") + .version(crate_version!()) + .author(crate_authors!()) + .arg(Arg::new("verbose").short('v').action(ArgAction::Count)) + .arg( + Arg::new("connector") + .short('c') + .action(ArgAction::Append) + .required(true), + ) + .arg(Arg::new("os").short('o').action(ArgAction::Append)) + .arg(Arg::new("process").short('p').action(ArgAction::Set)) + .get_matches() +} + +fn extract_args(matches: &ArgMatches) -> Result<(ConnectorChain<'_>, Option<&str>)> { + let log_level = match matches.get_count("verbose") { + 0 => Level::Error, + 1 => Level::Warn, + 2 => Level::Info, + 3 => Level::Debug, + 4 => Level::Trace, + _ => Level::Trace, + }; + simplelog::TermLogger::init( + log_level.to_level_filter(), + simplelog::Config::default(), + simplelog::TerminalMode::Stdout, + simplelog::ColorChoice::Auto, + ) + .unwrap(); + + let conn_iter = matches + .indices_of("connector") + .zip(matches.get_many::("connector")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + let os_iter = matches + .indices_of("os") + .zip(matches.get_many::("os")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + Ok(( + ConnectorChain::new(conn_iter, os_iter)?, + matches.get_one::("process").map(String::as_str), + )) +} diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32/examples/process_list.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/examples/process_list.rs new file mode 100644 index 0000000..6ec24cb --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/examples/process_list.rs @@ -0,0 +1,97 @@ +/*! +This example shows how to use a dynamically loaded connector in conjunction +with memflow-win32. This example uses the `Inventory` feature of memflow +but hard-wires the connector instance into the memflow-win32 OS layer. + +The example is an adaption of the memflow core process list example: +https://github.com/memflow/memflow/blob/next/memflow/examples/process_list.rs + +# Usage: +```bash +cargo run --release --example process_list -- -vv -c kvm +``` +*/ +use clap::*; +use log::{info, Level}; + +use memflow::prelude::v1::*; +use memflow_win32::prelude::v1::*; + +pub fn main() -> Result<()> { + let matches = parse_args(); + let chain = extract_args(&matches)?; + + // create inventory + connector + let inventory = Inventory::scan(); + let connector = inventory.builder().connector_chain(chain).build()?; + + let mut os = Win32Kernel::builder(connector) + .build_default_caches() + .build() + .unwrap(); + + let process_list = os.process_info_list().expect("unable to read process list"); + + info!( + "{:>5} {:>10} {:>10} {:<}", + "PID", "SYS ARCH", "PROC ARCH", "NAME" + ); + + for p in process_list { + info!( + "{:>5} {:^10} {:^10} {}", + p.pid, p.sys_arch, p.proc_arch, p.name + ); + } + + Ok(()) +} + +fn parse_args() -> ArgMatches { + Command::new("process_list example") + .version(crate_version!()) + .author(crate_authors!()) + .arg(Arg::new("verbose").short('v').action(ArgAction::Count)) + .arg( + Arg::new("connector") + .short('c') + .action(ArgAction::Append) + .required(true), + ) + .arg(Arg::new("os").short('o').action(ArgAction::Append)) + .get_matches() +} + +fn extract_args(matches: &ArgMatches) -> Result> { + let log_level = match matches.get_count("verbose") { + 0 => Level::Error, + 1 => Level::Warn, + 2 => Level::Info, + 3 => Level::Debug, + 4 => Level::Trace, + _ => Level::Trace, + }; + simplelog::TermLogger::init( + log_level.to_level_filter(), + simplelog::Config::default(), + simplelog::TerminalMode::Stdout, + simplelog::ColorChoice::Auto, + ) + .unwrap(); + + let conn_iter = matches + .indices_of("connector") + .zip(matches.get_many::("connector")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + let os_iter = matches + .indices_of("os") + .zip(matches.get_many::("os")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + ConnectorChain::new(conn_iter, os_iter) +} diff --git a/apex_dma/memflow_lib/memflow-win32/offsets/10_0_18362_X64_0AFB69F5FD264D54673570E37B38A3181.toml b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_18362_X64_0AFB69F5FD264D54673570E37B38A3181.toml similarity index 55% rename from apex_dma/memflow_lib/memflow-win32/offsets/10_0_18362_X64_0AFB69F5FD264D54673570E37B38A3181.toml rename to apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_18362_X64_0AFB69F5FD264D54673570E37B38A3181.toml index 3fe28d5..49fa06d 100644 --- a/apex_dma/memflow_lib/memflow-win32/offsets/10_0_18362_X64_0AFB69F5FD264D54673570E37B38A3181.toml +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_18362_X64_0AFB69F5FD264D54673570E37B38A3181.toml @@ -1,13 +1,15 @@ -pdb_file_name = 'ntkrnlmp.pdb' -pdb_guid = '0AFB69F5FD264D54673570E37B38A3181' +[header] +pdb_file_name = "ntkrnlmp.pdb" +pdb_guid = "0AFB69F5FD264D54673570E37B38A3181" nt_major_version = 10 nt_minor_version = 0 nt_build_number = 18362 -arch = 'X64' +arch = "X64" [offsets] list_blink = 8 eproc_link = 752 +phys_mem_block = 5719112 kproc_dtb = 40 eproc_pid = 744 eproc_name = 1104 @@ -16,7 +18,17 @@ eproc_section_base = 968 eproc_exit_status = 1620 eproc_thread_list = 1160 eproc_wow64 = 1064 +eproc_vad_root = 1624 kthread_teb = 240 ethread_list_entry = 1720 teb_peb = 96 teb_peb_x86 = 48 + +[offsets.mmvad] +vad_node = 0 +starting_vpn = 24 +ending_vpn = 28 +starting_vpn_high = 32 +ending_vpn_high = 33 +u = 48 +protection_bit = 7 diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_19041_X64_1C9875F76C8F0FBF3EB9A9D7C1C274061.toml b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_19041_X64_1C9875F76C8F0FBF3EB9A9D7C1C274061.toml new file mode 100644 index 0000000..b69ba1d --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_19041_X64_1C9875F76C8F0FBF3EB9A9D7C1C274061.toml @@ -0,0 +1,34 @@ +[header] +pdb_file_name = "ntkrnlmp.pdb" +pdb_guid = "1C9875F76C8F0FBF3EB9A9D7C1C274061" +nt_major_version = 10 +nt_minor_version = 0 +nt_build_number = 19041 +arch = "X64" + +[offsets] +list_blink = 8 +eproc_link = 1096 +phys_mem_block = 13612224 +kproc_dtb = 40 +eproc_pid = 1088 +eproc_name = 1448 +eproc_peb = 1360 +eproc_section_base = 1312 +eproc_exit_status = 2004 +eproc_thread_list = 1504 +eproc_wow64 = 1408 +eproc_vad_root = 2008 +kthread_teb = 240 +ethread_list_entry = 1256 +teb_peb = 96 +teb_peb_x86 = 48 + +[offsets.mmvad] +vad_node = 0 +starting_vpn = 24 +ending_vpn = 28 +starting_vpn_high = 32 +ending_vpn_high = 33 +u = 48 +protection_bit = 7 diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_19041_X64_9C00B19DBDE003DBFE4AB4216993C8431.toml b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_19041_X64_9C00B19DBDE003DBFE4AB4216993C8431.toml new file mode 100644 index 0000000..98424e3 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_19041_X64_9C00B19DBDE003DBFE4AB4216993C8431.toml @@ -0,0 +1,34 @@ +[header] +pdb_file_name = "ntkrnlmp.pdb" +pdb_guid = "9C00B19DBDE003DBFE4AB4216993C8431" +nt_major_version = 10 +nt_minor_version = 0 +nt_build_number = 19041 +arch = "X64" + +[offsets] +list_blink = 8 +eproc_link = 1096 +phys_mem_block = 13616296 +kproc_dtb = 40 +eproc_pid = 1088 +eproc_name = 1448 +eproc_peb = 1360 +eproc_section_base = 1312 +eproc_exit_status = 2004 +eproc_thread_list = 1504 +eproc_wow64 = 1408 +eproc_vad_root = 2008 +kthread_teb = 240 +ethread_list_entry = 1256 +teb_peb = 96 +teb_peb_x86 = 48 + +[offsets.mmvad] +vad_node = 0 +starting_vpn = 24 +ending_vpn = 28 +starting_vpn_high = 32 +ending_vpn_high = 33 +u = 48 +protection_bit = 7 diff --git a/apex_dma/memflow_lib/memflow-win32/offsets/10_0_19041_X64_BBED7C2955FBE4522AAA23F4B8677AD91.toml b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_19041_X64_BBED7C2955FBE4522AAA23F4B8677AD91.toml similarity index 55% rename from apex_dma/memflow_lib/memflow-win32/offsets/10_0_19041_X64_BBED7C2955FBE4522AAA23F4B8677AD91.toml rename to apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_19041_X64_BBED7C2955FBE4522AAA23F4B8677AD91.toml index e66d218..e8cbbdd 100644 --- a/apex_dma/memflow_lib/memflow-win32/offsets/10_0_19041_X64_BBED7C2955FBE4522AAA23F4B8677AD91.toml +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_19041_X64_BBED7C2955FBE4522AAA23F4B8677AD91.toml @@ -1,13 +1,15 @@ -pdb_file_name = 'ntkrnlmp.pdb' -pdb_guid = 'BBED7C2955FBE4522AAA23F4B8677AD91' +[header] +pdb_file_name = "ntkrnlmp.pdb" +pdb_guid = "BBED7C2955FBE4522AAA23F4B8677AD91" nt_major_version = 10 nt_minor_version = 0 nt_build_number = 19041 -arch = 'X64' +arch = "X64" [offsets] list_blink = 8 eproc_link = 1096 +phys_mem_block = 13612224 kproc_dtb = 40 eproc_pid = 1088 eproc_name = 1448 @@ -16,7 +18,17 @@ eproc_section_base = 1312 eproc_exit_status = 2004 eproc_thread_list = 1504 eproc_wow64 = 1408 +eproc_vad_root = 2008 kthread_teb = 240 ethread_list_entry = 1256 teb_peb = 96 teb_peb_x86 = 48 + +[offsets.mmvad] +vad_node = 0 +starting_vpn = 24 +ending_vpn = 28 +starting_vpn_high = 32 +ending_vpn_high = 33 +u = 48 +protection_bit = 7 diff --git a/apex_dma/memflow_lib/memflow-win32/offsets/10_0_19041_X86_1B1D6AA205E1C87DC63A314ACAA50B491.toml b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_19041_X86_1B1D6AA205E1C87DC63A314ACAA50B491.toml similarity index 54% rename from apex_dma/memflow_lib/memflow-win32/offsets/10_0_19041_X86_1B1D6AA205E1C87DC63A314ACAA50B491.toml rename to apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_19041_X86_1B1D6AA205E1C87DC63A314ACAA50B491.toml index 6bec991..033c105 100644 --- a/apex_dma/memflow_lib/memflow-win32/offsets/10_0_19041_X86_1B1D6AA205E1C87DC63A314ACAA50B491.toml +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_19041_X86_1B1D6AA205E1C87DC63A314ACAA50B491.toml @@ -1,13 +1,15 @@ -pdb_file_name = 'ntkrpamp.pdb' -pdb_guid = '1B1D6AA205E1C87DC63A314ACAA50B491' +[header] +pdb_file_name = "ntkrpamp.pdb" +pdb_guid = "1B1D6AA205E1C87DC63A314ACAA50B491" nt_major_version = 10 nt_minor_version = 0 nt_build_number = 19041 -arch = 'X86' +arch = "X86" [offsets] list_blink = 4 eproc_link = 232 +phys_mem_block = 3225556 kproc_dtb = 24 eproc_pid = 228 eproc_name = 428 @@ -16,7 +18,17 @@ eproc_section_base = 352 eproc_exit_status = 844 eproc_thread_list = 464 eproc_wow64 = 0 +eproc_vad_root = 848 kthread_teb = 168 ethread_list_entry = 740 teb_peb = 48 teb_peb_x86 = 48 + +[offsets.mmvad] +vad_node = 0 +starting_vpn = 12 +ending_vpn = 16 +starting_vpn_high = 0 +ending_vpn_high = 0 +u = 28 +protection_bit = 7 diff --git a/apex_dma/memflow_lib/memflow-win32/offsets/10_0_19041_X64_1C9875F76C8F0FBF3EB9A9D7C1C274061.toml b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_19045_X64_5F0CF5D532F385333A9B4ABA25CA65961.toml similarity index 58% rename from apex_dma/memflow_lib/memflow-win32/offsets/10_0_19041_X64_1C9875F76C8F0FBF3EB9A9D7C1C274061.toml rename to apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_19045_X64_5F0CF5D532F385333A9B4ABA25CA65961.toml index ef02c48..e4b815b 100644 --- a/apex_dma/memflow_lib/memflow-win32/offsets/10_0_19041_X64_1C9875F76C8F0FBF3EB9A9D7C1C274061.toml +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_19045_X64_5F0CF5D532F385333A9B4ABA25CA65961.toml @@ -1,13 +1,15 @@ +[header] pdb_file_name = 'ntkrnlmp.pdb' -pdb_guid = '1C9875F76C8F0FBF3EB9A9D7C1C274061' +pdb_guid = '5F0CF5D532F385333A9B4ABA25CA65961' nt_major_version = 10 nt_minor_version = 0 -nt_build_number = 19041 +nt_build_number = 19045 arch = 'X64' [offsets] list_blink = 8 eproc_link = 1096 +phys_mem_block = 13612216 kproc_dtb = 40 eproc_pid = 1088 eproc_name = 1448 @@ -16,7 +18,17 @@ eproc_section_base = 1312 eproc_exit_status = 2004 eproc_thread_list = 1504 eproc_wow64 = 1408 +eproc_vad_root = 2008 kthread_teb = 240 ethread_list_entry = 1256 teb_peb = 96 teb_peb_x86 = 48 + +[offsets.mmvad] +vad_node = 0 +starting_vpn = 24 +ending_vpn = 28 +starting_vpn_high = 32 +ending_vpn_high = 33 +u = 48 +protection_bit = 7 diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_22000_X86_55678BC384F099B6ED05E9E39046924A1.toml b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_22000_X86_55678BC384F099B6ED05E9E39046924A1.toml new file mode 100644 index 0000000..e9c1373 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/10_0_22000_X86_55678BC384F099B6ED05E9E39046924A1.toml @@ -0,0 +1,34 @@ +[header] +pdb_file_name = "ntkrnlmp.pdb" +pdb_guid = "55678BC384F099B6ED05E9E39046924A1" +nt_major_version = 10 +nt_minor_version = 0 +nt_build_number = 22000 +arch = "X86" + +[offsets] +list_blink = 8 +eproc_link = 1096 +phys_mem_block = 13658416 +kproc_dtb = 40 +eproc_pid = 1088 +eproc_name = 1448 +eproc_peb = 1360 +eproc_section_base = 1312 +eproc_exit_status = 2004 +eproc_thread_list = 1504 +eproc_wow64 = 1408 +eproc_vad_root = 2008 +kthread_teb = 240 +ethread_list_entry = 1336 +teb_peb = 96 +teb_peb_x86 = 48 + +[offsets.mmvad] +vad_node = 0 +starting_vpn = 24 +ending_vpn = 28 +starting_vpn_high = 32 +ending_vpn_high = 33 +u = 48 +protection_bit = 7 diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/3_10_511_X86.toml b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/3_10_511_X86.toml new file mode 100644 index 0000000..dad6275 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/3_10_511_X86.toml @@ -0,0 +1,35 @@ +[header] +nt_major_version = 3 +nt_minor_version = 10 +nt_build_number = 511 +arch = 'X86' + +[offsets] +phys_mem_block = 0 + +list_blink = 4 +eproc_link = 0xb4 + +kproc_dtb = 0x38 +eproc_pid = 0xb0 +eproc_name = 0x228 +eproc_peb = 0x1c0 +eproc_section_base = 0x1c4 +eproc_exit_status = 0 #5.1+ +eproc_thread_list = 0 #5.1+ +eproc_wow64 = 0 #5.0+ +eproc_vad_root = 0x01a4 #3.10+ + +kthread_teb = 0 #6.2+ +ethread_list_entry = 0x0 #5.0+ +teb_peb = 0 #? +teb_peb_x86 = 0 #? + +[offsets.mmvad] +vad_node = 12 +starting_vpn = 0 +ending_vpn = 4 +starting_vpn_high = 0 +ending_vpn_high = 0 +u = 20 +protection_bit = 0 diff --git a/apex_dma/memflow_lib/memflow-win32/offsets/4_0_1381_X86.toml b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/4_0_1381_X86.toml similarity index 67% rename from apex_dma/memflow_lib/memflow-win32/offsets/4_0_1381_X86.toml rename to apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/4_0_1381_X86.toml index 3c98e57..0e45b2b 100644 --- a/apex_dma/memflow_lib/memflow-win32/offsets/4_0_1381_X86.toml +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/4_0_1381_X86.toml @@ -1,9 +1,12 @@ +[header] nt_major_version = 4 nt_minor_version = 0 nt_build_number = 1381 arch = 'X86' [offsets] +phys_mem_block = 0 + list_blink = 4 eproc_link = 0x98 @@ -15,9 +18,18 @@ eproc_section_base = 0x190 eproc_exit_status = 0 #5.1+ eproc_thread_list = 0 #5.1+ eproc_wow64 = 0 #5.0+ +eproc_vad_root = 0x0170 #3.10+ kthread_teb = 0 #6.2+ ethread_list_entry = 0x0 #5.0+ teb_peb = 0 #? teb_peb_x86 = 0 #? +[offsets.mmvad] +vad_node = 12 +starting_vpn = 0 +ending_vpn = 4 +starting_vpn_high = 0 +ending_vpn_high = 0 +u = 20 +protection_bit = 0 diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/5_2_3790_X64_82DCF67A38274C9CA99B60B421D2786D2.toml b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/5_2_3790_X64_82DCF67A38274C9CA99B60B421D2786D2.toml new file mode 100644 index 0000000..688d37d --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/5_2_3790_X64_82DCF67A38274C9CA99B60B421D2786D2.toml @@ -0,0 +1,34 @@ +[header] +pdb_file_name = "ntkrnlmp.pdb" +pdb_guid = "82DCF67A38274C9CA99B60B421D2786D2" +nt_major_version = 5 +nt_minor_version = 2 +nt_build_number = 3790 +arch = "X64" + +[offsets] +list_blink = 8 +eproc_link = 224 +phys_mem_block = 1921104 +kproc_dtb = 40 +eproc_pid = 216 +eproc_name = 616 +eproc_peb = 704 +eproc_section_base = 504 +eproc_exit_status = 908 +eproc_thread_list = 656 +eproc_wow64 = 680 +eproc_vad_root = 920 +kthread_teb = 176 +ethread_list_entry = 976 +teb_peb = 96 +teb_peb_x86 = 48 + +[offsets.mmvad] +vad_node = 8 +starting_vpn = 24 +ending_vpn = 32 +starting_vpn_high = 0 +ending_vpn_high = 0 +u = 40 +protection_bit = 56 diff --git a/apex_dma/memflow_lib/memflow-win32/offsets/6_1_7601_X64_ECE191A20CFF4465AE46DF96C22638451.toml b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/6_1_7601_X64_ECE191A20CFF4465AE46DF96C22638451.toml similarity index 54% rename from apex_dma/memflow_lib/memflow-win32/offsets/6_1_7601_X64_ECE191A20CFF4465AE46DF96C22638451.toml rename to apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/6_1_7601_X64_ECE191A20CFF4465AE46DF96C22638451.toml index 322aa81..cb6f1c6 100644 --- a/apex_dma/memflow_lib/memflow-win32/offsets/6_1_7601_X64_ECE191A20CFF4465AE46DF96C22638451.toml +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/6_1_7601_X64_ECE191A20CFF4465AE46DF96C22638451.toml @@ -1,13 +1,15 @@ -pdb_file_name = 'ntkrnlmp.pdb' -pdb_guid = 'ECE191A20CFF4465AE46DF96C22638451' +[header] +pdb_file_name = "ntkrnlmp.pdb" +pdb_guid = "ECE191A20CFF4465AE46DF96C22638451" nt_major_version = 6 nt_minor_version = 1 nt_build_number = 7601 -arch = 'X64' +arch = "X64" [offsets] list_blink = 8 eproc_link = 392 +phys_mem_block = 2740280 kproc_dtb = 40 eproc_pid = 384 eproc_name = 736 @@ -16,7 +18,17 @@ eproc_section_base = 624 eproc_exit_status = 1092 eproc_thread_list = 776 eproc_wow64 = 800 +eproc_vad_root = 1096 kthread_teb = 184 ethread_list_entry = 1064 teb_peb = 96 teb_peb_x86 = 48 + +[offsets.mmvad] +vad_node = 8 +starting_vpn = 24 +ending_vpn = 32 +starting_vpn_high = 0 +ending_vpn_high = 0 +u = 40 +protection_bit = 56 diff --git a/apex_dma/memflow_lib/memflow-win32/offsets/6_1_7601_X86_684DA42A30CC450F81C535B4D18944B12.toml b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/6_1_7601_X86_684DA42A30CC450F81C535B4D18944B12.toml similarity index 54% rename from apex_dma/memflow_lib/memflow-win32/offsets/6_1_7601_X86_684DA42A30CC450F81C535B4D18944B12.toml rename to apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/6_1_7601_X86_684DA42A30CC450F81C535B4D18944B12.toml index 8260bd7..644cb75 100644 --- a/apex_dma/memflow_lib/memflow-win32/offsets/6_1_7601_X86_684DA42A30CC450F81C535B4D18944B12.toml +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/offsets/6_1_7601_X86_684DA42A30CC450F81C535B4D18944B12.toml @@ -1,13 +1,15 @@ -pdb_file_name = 'ntkrpamp.pdb' -pdb_guid = '684DA42A30CC450F81C535B4D18944B12' +[header] +pdb_file_name = "ntkrpamp.pdb" +pdb_guid = "684DA42A30CC450F81C535B4D18944B12" nt_major_version = 6 nt_minor_version = 1 nt_build_number = 7601 -arch = 'X86' +arch = "X86" [offsets] list_blink = 4 eproc_link = 184 +phys_mem_block = 1484392 kproc_dtb = 24 eproc_pid = 180 eproc_name = 364 @@ -16,7 +18,17 @@ eproc_section_base = 300 eproc_exit_status = 628 eproc_thread_list = 392 eproc_wow64 = 0 +eproc_vad_root = 632 kthread_teb = 136 ethread_list_entry = 616 teb_peb = 48 teb_peb_x86 = 48 + +[offsets.mmvad] +vad_node = 4 +starting_vpn = 12 +ending_vpn = 16 +starting_vpn_high = 0 +ending_vpn_high = 0 +u = 20 +protection_bit = 24 diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/mod.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/mod.rs new file mode 100644 index 0000000..542d83f --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/mod.rs @@ -0,0 +1,6 @@ +pub mod ntos; +pub mod start_block; +pub mod sysproc; + +pub use memflow_win32_defs::kernel::*; +pub use start_block::StartBlock; diff --git a/apex_dma/memflow_lib/memflow-win32/src/kernel/ntos.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/ntos.rs similarity index 52% rename from apex_dma/memflow_lib/memflow-win32/src/kernel/ntos.rs rename to apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/ntos.rs index 5c2996e..0941d5e 100644 --- a/apex_dma/memflow_lib/memflow-win32/src/kernel/ntos.rs +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/ntos.rs @@ -3,24 +3,26 @@ pub(crate) mod pehelper; mod x64; mod x86; -use super::{StartBlock, Win32GUID, Win32Version}; -use crate::error::{Error, PartialResultExt, Result}; +use super::{StartBlock, Win32Guid, Win32Version}; use std::convert::TryInto; use std::prelude::v1::*; use log::{info, warn}; -use memflow::mem::VirtualMemory; -use memflow::types::Address; +use memflow::architecture::ArchitectureObj; +use memflow::error::{Error, ErrorKind, ErrorOrigin, PartialResultExt, Result}; +use memflow::mem::{MemoryView, VirtualTranslate}; +use memflow::types::{umem, Address}; use pelite::{self, pe64::debug::CodeView, pe64::exports::Export, PeView}; -pub fn find( +pub fn find( virt_mem: &mut T, start_block: &StartBlock, -) -> Result<(Address, usize)> { - if start_block.arch.bits() == 64 { +) -> Result<(Address, umem)> { + let arch_obj = ArchitectureObj::from(start_block.arch); + if arch_obj.bits() == 64 { if !start_block.kernel_hint.is_null() { match x64::find_with_va_hint(virt_mem, start_block) { Ok(b) => return Ok(b), @@ -32,27 +34,28 @@ pub fn find( Ok(b) => return Ok(b), Err(e) => warn!("x64::find() error: {}", e), } - } else if start_block.arch.bits() == 32 { + } else if arch_obj.bits() == 32 { match x86::find(virt_mem, start_block) { Ok(b) => return Ok(b), Err(e) => warn!("x86::find() error: {}", e), } } - Err(Error::Initialization("unable to find ntoskrnl.exe")) + Err(Error(ErrorOrigin::OsLayer, ErrorKind::ProcessNotFound) + .log_info("unable to find ntoskrnl.exe")) } // TODO: move to pe::... -pub fn find_guid(virt_mem: &mut T, kernel_base: Address) -> Result { - let image = pehelper::try_get_pe_image(virt_mem, kernel_base)?; - let pe = PeView::from_bytes(&image).map_err(Error::PE)?; +pub fn find_guid(mem: &mut T, kernel_base: Address) -> Result { + let image = pehelper::try_get_pe_image(mem, kernel_base)?; + let pe = PeView::from_bytes(&image) + .map_err(|err| Error(ErrorOrigin::OsLayer, ErrorKind::InvalidExeFile).log_info(err))?; let debug = match pe.debug() { Ok(d) => d, Err(_) => { - return Err(Error::Initialization( - "unable to read debug_data in pe header", - )) + return Err(Error(ErrorOrigin::OsLayer, ErrorKind::InvalidExeFile) + .log_info("unable to read debug_data in pe header")) } }; @@ -61,68 +64,74 @@ pub fn find_guid(virt_mem: &mut T, kernel_base: Address) -> Re .map(|e| e.entry()) .filter_map(std::result::Result::ok) .find(|&e| e.as_code_view().is_some()) - .ok_or_else(|| Error::Initialization("unable to find codeview debug_data entry"))? + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::InvalidExeFile) + .log_info("unable to find codeview debug_data entry") + })? .as_code_view() - .ok_or_else(|| Error::PE(pelite::Error::Unmapped))?; + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::InvalidExeFile) + .log_info("unable to find codeview debug_data entry") + })?; let signature = match code_view { CodeView::Cv70 { image, .. } => image.Signature, CodeView::Cv20 { .. } => { - return Err(Error::Initialization( - "invalid code_view entry version 2 found, expected 7", - )) + return Err(Error(ErrorOrigin::OsLayer, ErrorKind::InvalidExeFile) + .log_info("invalid code_view entry version 2 found, expected 7")) } }; - let file_name = code_view.pdb_file_name().to_str()?; + let file_name = code_view.pdb_file_name().to_str().map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::Encoding) + .log_info("unable to convert pdb file name to string") + })?; let guid = format!("{:X}{:X}", signature, code_view.age()); - Ok(Win32GUID::new(file_name, &guid)) + Ok(Win32Guid::new(file_name, &guid)) } -fn get_export(pe: &PeView, name: &str) -> Result { +fn get_export(pe: &PeView, name: &str) -> Result { info!("trying to find {} export", name); - let export = match pe.get_export_by_name(name).map_err(Error::PE)? { - Export::Symbol(s) => *s as usize, + let export = match pe + .get_export_by_name(name) + .map_err(|err| Error(ErrorOrigin::OsLayer, ErrorKind::ExportNotFound).log_info(err))? + { + Export::Symbol(s) => *s as umem, Export::Forward(_) => { - return Err(Error::Other("Export found but it was a forwarded export")) + return Err(Error(ErrorOrigin::OsLayer, ErrorKind::ExportNotFound) + .log_info("Export found but it was a forwarded export")) } }; info!("{} found at 0x{:x}", name, export); Ok(export) } -pub fn find_winver( - virt_mem: &mut T, - kernel_base: Address, -) -> Result { - let image = pehelper::try_get_pe_image(virt_mem, kernel_base)?; - let pe = PeView::from_bytes(&image).map_err(Error::PE)?; +pub fn find_winver(mem: &mut T, kernel_base: Address) -> Result { + let image = pehelper::try_get_pe_image(mem, kernel_base)?; + let pe = PeView::from_bytes(&image) + .map_err(|err| Error(ErrorOrigin::OsLayer, ErrorKind::InvalidExeFile).log_info(err))?; // NtBuildNumber let nt_build_number_ref = get_export(&pe, "NtBuildNumber")?; let rtl_get_version_ref = get_export(&pe, "RtlGetVersion"); - let nt_build_number: u32 = virt_mem.virt_read(kernel_base + nt_build_number_ref)?; + let nt_build_number: u32 = mem.read(kernel_base + nt_build_number_ref)?; info!("nt_build_number: {}", nt_build_number); if nt_build_number == 0 { - return Err(Error::Initialization("unable to fetch nt build number")); + return Err(Error(ErrorOrigin::OsLayer, ErrorKind::InvalidExeFile) + .log_info("unable to fetch nt build number")); } // TODO: these reads should be optional // try to find major/minor version // read from KUSER_SHARED_DATA. these fields exist since nt 4.0 so they have to exist in case NtBuildNumber exists. - let mut nt_major_version: u32 = virt_mem - .virt_read((0x7ffe0000 + 0x026C).into()) - .data_part()?; - let mut nt_minor_version: u32 = virt_mem - .virt_read((0x7ffe0000 + 0x0270).into()) - .data_part()?; + let mut nt_major_version: u32 = mem.read((0x7ffe0000 + 0x026C).into()).data_part()?; + let mut nt_minor_version: u32 = mem.read((0x7ffe0000 + 0x0270).into()).data_part()?; // fallback on x64: try to parse RtlGetVersion assembly if nt_major_version == 0 && rtl_get_version_ref.is_ok() { let mut buf = [0u8; 0x100]; - virt_mem - .virt_read_into(kernel_base + rtl_get_version_ref.unwrap(), &mut buf) + mem.read_into(kernel_base + rtl_get_version_ref.unwrap(), &mut buf) .data_part()?; nt_major_version = 0; diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/ntos/pehelper.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/ntos/pehelper.rs new file mode 100644 index 0000000..b82997e --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/ntos/pehelper.rs @@ -0,0 +1,64 @@ +use std::convert::TryInto; +use std::prelude::v1::*; + +use log::debug; + +use memflow::error::{Error, ErrorKind, ErrorOrigin, PartialResultExt, Result}; +use memflow::mem::MemoryView; +use memflow::types::{size, umem, Address}; + +use pelite::{self, PeView}; + +pub fn try_get_pe_size(mem: &mut T, probe_addr: Address) -> Result { + let mut probe_buf = vec![0; size::kb(4)]; + mem.read_raw_into(probe_addr, &mut probe_buf)?; + + let pe_probe = PeView::from_bytes(&probe_buf) + .map_err(|err| Error(ErrorOrigin::OsLayer, ErrorKind::InvalidExeFile).log_trace(err))?; + + let opt_header = pe_probe.optional_header(); + let size_of_image = match opt_header { + pelite::Wrap::T32(opt32) => opt32.SizeOfImage, + pelite::Wrap::T64(opt64) => opt64.SizeOfImage, + }; + if size_of_image > 0 { + debug!( + "found pe header for image with a size of {} bytes.", + size_of_image + ); + Ok(size_of_image as umem) + } else { + Err(Error(ErrorOrigin::OsLayer, ErrorKind::InvalidExeFile) + .log_trace("pe size_of_image is zero")) + } +} + +pub fn try_get_pe_image(mem: &mut T, probe_addr: Address) -> Result> { + let size_of_image = try_get_pe_size(mem, probe_addr)?; + mem.read_raw(probe_addr, size_of_image.try_into().unwrap()) + .data_part() +} + +pub fn try_get_pe_name(mem: &mut T, probe_addr: Address) -> Result { + let image = try_get_pe_image(mem, probe_addr)?; + let pe = PeView::from_bytes(&image) + .map_err(|err| Error(ErrorOrigin::OsLayer, ErrorKind::InvalidExeFile).log_trace(err))?; + let name = pe + .exports() + .map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::InvalidExeFile) + .log_trace("unable to get exports") + })? + .dll_name() + .map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::InvalidExeFile) + .log_trace("unable to get dll name") + })? + .to_str() + .map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::Encoding) + .log_trace("unable to convert dll name string") + })?; + debug!("try_get_pe_name: found pe header for {}", name); + Ok(name.to_string()) +} diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/ntos/x64.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/ntos/x64.rs new file mode 100644 index 0000000..ed12a6f --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/ntos/x64.rs @@ -0,0 +1,108 @@ +use std::prelude::v1::*; + +use super::pehelper; +use crate::kernel::StartBlock; + +use log::{debug, trace}; + +use memflow::architecture::{x86::x64, ArchitectureObj}; +use memflow::cglue::tuple::*; +use memflow::dataview::PodMethods; +use memflow::error::{Error, ErrorKind, ErrorOrigin, PartialResultExt, Result}; +use memflow::iter::PageChunks; +use memflow::mem::{MemoryView, VirtualTranslate}; +use memflow::types::{mem, size, smem, umem, Address}; + +use pelite::image::IMAGE_DOS_HEADER; + +pub fn find_with_va_hint( + virt_mem: &mut T, + start_block: &StartBlock, +) -> Result<(Address, umem)> { + debug!( + "x64::find_with_va_hint: trying to find ntoskrnl.exe with va hint at {:x}", + start_block.kernel_hint.to_umem() + ); + + // va was found previously + let mut va_base = start_block.kernel_hint.to_umem() & !0x0001_ffff; + while va_base + mem::mb(16) > start_block.kernel_hint.to_umem() { + trace!("x64::find_with_va_hint: probing at {:x}", va_base); + + match find_with_va(virt_mem, va_base) { + Ok(a) => { + let addr = Address::from(a); + let size_of_image = pehelper::try_get_pe_size(virt_mem, addr)?; + return Ok((addr, size_of_image)); + } + Err(e) => trace!("x64::find_with_va_hint: probe error {:?}", e), + } + + va_base -= mem::mb(2); + } + + Err(Error(ErrorOrigin::OsLayer, ErrorKind::ProcessNotFound) + .log_trace("x64::find_with_va_hint: unable to locate ntoskrnl.exe via va hint")) +} + +fn find_with_va(virt_mem: &mut T, va_base: umem) -> Result { + let mut buf = vec![0; size::mb(2)]; + virt_mem + .read_raw_into(Address::from(va_base), &mut buf) + .data_part()?; + + buf.chunks_exact(x64::ARCH.page_size()) + .enumerate() + .map(|(i, c)| { + let view = PodMethods::as_data_view(c); + (i, c, view.read::(0)) // TODO: potential endian mismatch + }) + .filter(|(_, _, p)| p.e_magic == 0x5a4d) // MZ + .filter(|(_, _, p)| p.e_lfanew <= 0x800) + .inspect(|(i, _, _)| { + trace!( + "x64::find_with_va: found potential header flags at offset {:x}", + *i as umem * x64::ARCH.page_size() as umem + ) + }) + .find(|(i, _, _)| { + let probe_addr = Address::from(va_base + (*i as umem) * x64::ARCH.page_size() as umem); + let name = pehelper::try_get_pe_name(virt_mem, probe_addr).unwrap_or_default(); + name == "ntoskrnl.exe" + }) + .map(|(i, _, _)| va_base + i as umem * x64::ARCH.page_size() as umem) + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::ProcessNotFound) + .log_trace("unable to locate ntoskrnl.exe") + }) +} + +pub fn find( + virt_mem: &mut T, + start_block: &StartBlock, +) -> Result<(Address, umem)> { + debug!("x64::find: trying to find ntoskrnl.exe with page map",); + + let page_map = virt_mem.virt_page_map_range_vec( + smem::mb(2), + (!0u64 - (1u64 << (ArchitectureObj::from(start_block.arch).address_space_bits() - 1))) + .into(), + (!0u64).into(), + ); + + match page_map + .into_iter() + .flat_map(|CTup3(address, size, _)| size.page_chunks(address, size::mb(2))) + .filter(|(_, size)| *size > mem::kb(256)) + .filter_map(|(va, _)| find_with_va(virt_mem, va.to_umem()).ok()) + .next() + { + Some(a) => { + let addr = Address::from(a); + let size_of_image = pehelper::try_get_pe_size(virt_mem, addr)?; + Ok((addr, size_of_image)) + } + None => Err(Error(ErrorOrigin::OsLayer, ErrorKind::ProcessNotFound) + .log_trace("x64::find: unable to locate ntoskrnl.exe with a page map")), + } +} diff --git a/apex_dma/memflow_lib/memflow-win32/src/kernel/ntos/x86.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/ntos/x86.rs similarity index 67% rename from apex_dma/memflow_lib/memflow-win32/src/kernel/ntos/x86.rs rename to apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/ntos/x86.rs index efec044..a840df1 100644 --- a/apex_dma/memflow_lib/memflow-win32/src/kernel/ntos/x86.rs +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/ntos/x86.rs @@ -1,16 +1,15 @@ use std::prelude::v1::*; use super::pehelper; -use crate::error::{Error, Result}; use crate::kernel::StartBlock; -use log::{debug, info}; +use memflow::dataview::PodMethods; +use memflow::error::{Error, ErrorKind, ErrorOrigin, PartialResultExt, Result}; +use memflow::mem::MemoryView; +use memflow::types::{size, umem, Address}; -use memflow::error::PartialResultExt; -use memflow::mem::VirtualMemory; -use memflow::types::{size, Address}; +use log::{debug, info}; -use dataview::Pod; use pelite::image::IMAGE_DOS_HEADER; const SIZE_256MB: usize = size::mb(256); @@ -18,23 +17,20 @@ const SIZE_8MB: usize = size::mb(8); const SIZE_4KB: usize = size::kb(4); // https://github.com/ufrisk/MemProcFS/blob/f2d15cf4fe4f19cfeea3dad52971fae2e491064b/vmm/vmmwininit.c#L410 -pub fn find( - virt_mem: &mut T, - _start_block: &StartBlock, -) -> Result<(Address, usize)> { +pub fn find(virt_mem: &mut T, _start_block: &StartBlock) -> Result<(Address, umem)> { debug!("x86::find: trying to find ntoskrnl.exe"); - for base_addr in (0..SIZE_256MB as u64).step_by(SIZE_8MB) { - let base_addr = size::gb(2) as u64 + base_addr; + for base_addr in (0..SIZE_256MB).step_by(SIZE_8MB) { + let base_addr = size::gb(2) + base_addr; // search in each page in the first 8mb chunks in the first 64mb of virtual memory let mut buf = vec![0; SIZE_8MB]; virt_mem - .virt_read_raw_into(base_addr.into(), &mut buf) + .read_raw_into(base_addr.into(), &mut buf) .data_part()?; - for addr in (0..SIZE_8MB as u64).step_by(SIZE_4KB) { + for addr in (0..SIZE_8MB).step_by(SIZE_4KB) { // TODO: potential endian mismatch in pod - let view = Pod::as_data_view(&buf[addr as usize..]); + let view = PodMethods::as_data_view(&buf[addr..]); // check for dos header signature (MZ) // TODO: create global if view.read::(0).e_magic != 0x5a4d { @@ -58,7 +54,6 @@ pub fn find( } } - Err(Error::Initialization( - "find_x86(): unable to locate ntoskrnl.exe in high mem", - )) + Err(Error(ErrorOrigin::OsLayer, ErrorKind::ProcessNotFound) + .log_trace("find_x86(): unable to locate ntoskrnl.exe in high mem")) } diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/start_block.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/start_block.rs new file mode 100644 index 0000000..1f5ece4 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/start_block.rs @@ -0,0 +1,94 @@ +mod aarch64; +mod x64; +mod x86; +mod x86pae; + +use std::prelude::v1::*; + +use log::warn; + +use memflow::architecture::ArchitectureIdent; +use memflow::error::{Error, ErrorKind, ErrorOrigin, Result}; +use memflow::mem::PhysicalMemory; +use memflow::types::{size, Address, PhysicalAddress}; + +// PROCESSOR_START_BLOCK +#[derive(Debug, Copy, Clone)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize))] +pub struct StartBlock { + pub arch: ArchitectureIdent, + pub kernel_hint: Address, + pub dtb: Address, +} + +pub fn find_fallback( + mem: &mut T, + arch: ArchitectureIdent, +) -> Result { + match arch { + ArchitectureIdent::X86(64, _) => { + // read low 16mb stub + let mut low16m = vec![0; size::mb(16)]; + mem.phys_read_into(PhysicalAddress::NULL, low16m.as_mut_slice())?; + + x64::find(&low16m) + } + ArchitectureIdent::AArch64(_) => { + // read low 16mb stub + let mut low16m = vec![0; size::mb(16)]; + + //TODO: configure this, but so far arm null starts at this address + mem.phys_read_into(aarch64::PHYS_BASE.into(), low16m.as_mut_slice())?; + + aarch64::find(&low16m) + } + _ => Err(Error(ErrorOrigin::OsLayer, ErrorKind::NotImplemented) + .log_error("start_block: fallback not implemented for given arch")), + } +} + +// bcdedit /set firstmegabytepolicyuseall +pub fn find(mem: &mut T, arch: Option) -> Result { + if let Some(arch) = arch { + match arch { + ArchitectureIdent::X86(64, _) => { + // read low 1mb stub + let mut low1m = vec![0; size::mb(1)]; + mem.phys_read_into(PhysicalAddress::NULL, low1m.as_mut_slice())?; + + // find x64 dtb in low stub < 1M + match x64::find_lowstub(&low1m) { + Ok(d) => { + if d.dtb.to_umem() != 0 { + return Ok(d); + } + } + Err(e) => warn!("x64::find_lowstub() error: {}", e), + } + + find_fallback(mem, arch) + } + ArchitectureIdent::X86(32, true) => { + let mut low16m = vec![0; size::mb(16)]; + mem.phys_read_into(PhysicalAddress::NULL, low16m.as_mut_slice())?; + x86pae::find(&low16m) + } + ArchitectureIdent::X86(32, false) => { + let mut low16m = vec![0; size::mb(16)]; + mem.phys_read_into(PhysicalAddress::NULL, low16m.as_mut_slice())?; + x86::find(&low16m) + } + ArchitectureIdent::AArch64(_) => find_fallback(mem, arch), + _ => Err(Error(ErrorOrigin::OsLayer, ErrorKind::NotSupported) + .log_error("Unsupported architecture")), + } + } else { + find(mem, Some(ArchitectureIdent::X86(64, false))) + .or_else(|_| find(mem, Some(ArchitectureIdent::X86(32, true)))) + .or_else(|_| find(mem, Some(ArchitectureIdent::X86(32, false)))) + .or_else(|_| find(mem, Some(ArchitectureIdent::AArch64(size::kb(4))))) + .map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::NotFound).log_error("unable to find dtb") + }) + } +} diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/start_block/aarch64.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/start_block/aarch64.rs new file mode 100644 index 0000000..3e47908 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/start_block/aarch64.rs @@ -0,0 +1,62 @@ +use crate::kernel::StartBlock; + +use std::convert::TryInto; + +use memflow::architecture::arm::aarch64; +use memflow::error::{Error, ErrorKind, ErrorOrigin, Result}; +use memflow::types::{mem, umem, Address}; + +#[allow(clippy::unnecessary_cast)] +pub const PHYS_BASE: u64 = mem::gb(1) as u64; + +// mem here has to be a single page (4kb sized) +fn find_pt(addr: Address, mem: &[u8]) -> Option
{ + // TODO: global define / config setting + #[allow(clippy::unnecessary_cast)] + let max_mem = mem::gb(512) as u64; + + let pte = u64::from_le_bytes(mem[0..8].try_into().unwrap()); + + if (pte & 0x0000_0000_0000_0fff) != 0xf03 || (pte & 0x0000_ffff_ffff_f000) > max_mem { + return None; + } + + // Second half must have a self ref entry + // This is usually enough to filter wrong data out + #[allow(clippy::unnecessary_cast)] + mem[0x800..] + .chunks(8) + .map(|c| u64::from_le_bytes(c.try_into().unwrap())) + .find(|a| (a ^ 0xf03) & (!0u64 >> 12) == addr.to_umem() as u64)?; + + // A page table does need to have some entries, right? Particularly, kernel-side page table + // entries must exist + mem[0x800..] + .chunks(8) + .map(|c| u64::from_le_bytes(c.try_into().unwrap())) + .filter(|a| (a & 0xfff) == 0x703) + .nth(5)?; + + Some(addr) +} + +pub fn find(mem: &[u8]) -> Result { + mem.chunks_exact(aarch64::ARCH.page_size()) + .enumerate() + .filter_map(|(i, c)| { + find_pt( + Address::from(PHYS_BASE) + (i as umem * aarch64::ARCH.page_size() as umem), + c, + ) + }) + .map(|addr| StartBlock { + arch: aarch64::ARCH.ident(), + kernel_hint: Address::NULL, + dtb: addr, + }) + .next() + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::NotFound) + .log_warn("unable to find aarch64 dtb in lowstub < 16M") + }) +} diff --git a/apex_dma/memflow_lib/memflow-win32/src/kernel/start_block/x64.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/start_block/x64.rs similarity index 71% rename from apex_dma/memflow_lib/memflow-win32/src/kernel/start_block/x64.rs rename to apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/start_block/x64.rs index 1df1e2a..ff2b8fd 100644 --- a/apex_dma/memflow_lib/memflow-win32/src/kernel/start_block/x64.rs +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/start_block/x64.rs @@ -1,15 +1,14 @@ -use crate::error::{Error, Result}; use crate::kernel::StartBlock; use std::convert::TryInto; use memflow::architecture::x86::x64; -use memflow::types::{size, Address}; +use memflow::error::{Error, ErrorKind, ErrorOrigin, Result}; +use memflow::types::{mem, umem, Address}; // https://github.com/ufrisk/MemProcFS/blob/f2d15cf4fe4f19cfeea3dad52971fae2e491064b/vmm/vmmwininit.c#L560 pub fn find_lowstub(stub: &[u8]) -> Result { - Ok(stub - .chunks_exact(x64::ARCH.page_size()) + stub.chunks_exact(x64::ARCH.page_size()) .skip(1) .filter(|c| { (0xffff_ffff_ffff_00ff & u64::from_le_bytes(c[0..8].try_into().unwrap())) @@ -23,16 +22,20 @@ pub fn find_lowstub(stub: &[u8]) -> Result { (0xffff_ff00_0000_0fff & u64::from_le_bytes(c[0xa0..0xa0 + 8].try_into().unwrap())) == 0 }) // pml4 .map(|c| StartBlock { - arch: x64::ARCH, + arch: x64::ARCH.ident(), kernel_hint: u64::from_le_bytes(c[0x70..0x70 + 8].try_into().unwrap()).into(), dtb: u64::from_le_bytes(c[0xa0..0xa0 + 8].try_into().unwrap()).into(), }) - .ok_or_else(|| Error::Initialization("unable to find x64 dtb in lowstub < 1M"))?) + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::NotFound) + .log_warn("unable to find x64 dtb in lowstub < 1M") + }) } fn find_pt(addr: Address, mem: &[u8]) -> Option
{ // TODO: global define / config setting - let max_mem = size::gb(512) as u64; + #[allow(clippy::unnecessary_cast)] + let max_mem = mem::gb(512) as u64; let pte = u64::from_le_bytes(mem[0..8].try_into().unwrap()); @@ -42,10 +45,11 @@ fn find_pt(addr: Address, mem: &[u8]) -> Option
{ // Second half must have a self ref entry // This is usually enough to filter wrong data out + #[allow(clippy::unnecessary_cast)] mem[0x800..] .chunks(8) .map(|c| u64::from_le_bytes(c.try_into().unwrap())) - .find(|a| (a ^ 0x0000_0000_0000_0063) & !(1u64 << 63) == addr.as_u64())?; + .find(|a| (a ^ 0x0000_0000_0000_0063) & !(1u64 << 63) == addr.to_umem() as u64)?; // A page table does need to have some entries, right? Particularly, kernel-side page table // entries must be marked as such @@ -61,12 +65,15 @@ fn find_pt(addr: Address, mem: &[u8]) -> Option
{ pub fn find(mem: &[u8]) -> Result { mem.chunks_exact(x64::ARCH.page_size()) .enumerate() - .filter_map(|(i, c)| find_pt((i * x64::ARCH.page_size()).into(), c)) + .filter_map(|(i, c)| find_pt((i as umem * x64::ARCH.page_size() as umem).into(), c)) .map(|addr| StartBlock { - arch: x64::ARCH, - kernel_hint: 0.into(), + arch: x64::ARCH.ident(), + kernel_hint: Address::NULL, dtb: addr, }) .next() - .ok_or_else(|| Error::Initialization("unable to find x64 dtb in lowstub < 16M")) + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::NotFound) + .log_warn("unable to find x64 dtb in lowstub < 16M") + }) } diff --git a/apex_dma/memflow_lib/memflow-win32/src/kernel/start_block/x86.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/start_block/x86.rs similarity index 60% rename from apex_dma/memflow_lib/memflow-win32/src/kernel/start_block/x86.rs rename to apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/start_block/x86.rs index 30f0c25..65e5dc7 100644 --- a/apex_dma/memflow_lib/memflow-win32/src/kernel/start_block/x86.rs +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/start_block/x86.rs @@ -1,9 +1,9 @@ -use crate::error::{Error, Result}; use crate::kernel::StartBlock; use std::convert::TryInto; use memflow::architecture::x86::x32; +use memflow::error::{Error, ErrorKind, ErrorOrigin, Result}; use memflow::iter::PageChunks; use memflow::types::Address; @@ -13,7 +13,7 @@ fn check_page(base: Address, mem: &[u8]) -> bool { } let dword = u32::from_le_bytes(mem[0xc00..0xc00 + 4].try_into().unwrap()); - if (dword & 0xffff_f003) != (base.as_u32() + 0x3) { + if (dword & 0xffff_f003) != TryInto::::try_into(base.to_umem() + 0x3).unwrap() { return false; } @@ -26,12 +26,15 @@ fn check_page(base: Address, mem: &[u8]) -> bool { } pub fn find(mem: &[u8]) -> Result { - mem.page_chunks(Address::from(0), x32::ARCH.page_size()) + mem.page_chunks(Address::NULL, x32::ARCH.page_size()) .find(|(a, c)| check_page(*a, c)) .map(|(a, _)| StartBlock { - arch: x32::ARCH, - kernel_hint: 0.into(), + arch: x32::ARCH.ident(), + kernel_hint: Address::NULL, dtb: a, }) - .ok_or_else(|| Error::Initialization("unable to find x86 dtb in lowstub < 16M")) + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::NotFound) + .log_warn("unable to find x86 dtb in lowstub < 16M") + }) } diff --git a/apex_dma/memflow_lib/memflow-win32/src/kernel/start_block/x86pae.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/start_block/x86pae.rs similarity index 55% rename from apex_dma/memflow_lib/memflow-win32/src/kernel/start_block/x86pae.rs rename to apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/start_block/x86pae.rs index 0657768..5d1fc4a 100644 --- a/apex_dma/memflow_lib/memflow-win32/src/kernel/start_block/x86pae.rs +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/start_block/x86pae.rs @@ -1,16 +1,17 @@ -use crate::error::{Error, Result}; use crate::kernel::StartBlock; use std::convert::TryInto; use memflow::architecture::x86::x32_pae; +use memflow::error::{Error, ErrorKind, ErrorOrigin, Result}; use memflow::iter::PageChunks; use memflow::types::Address; +#[allow(clippy::unnecessary_cast)] fn check_page(addr: Address, mem: &[u8]) -> bool { for (i, chunk) in mem.to_vec().chunks_exact(8).enumerate() { let qword = u64::from_le_bytes(chunk[0..8].try_into().unwrap()); - if (i < 4 && qword != addr.as_u64() + ((i as u64 * 8) << 9) + 0x1001) + if (i < 4 && qword != addr.to_umem() as u64 + ((i as u64 * 8) << 9) + 0x1001) || (i >= 4 && qword != 0) { return false; @@ -20,12 +21,15 @@ fn check_page(addr: Address, mem: &[u8]) -> bool { } pub fn find(mem: &[u8]) -> Result { - mem.page_chunks(Address::from(0), x32_pae::ARCH.page_size()) + mem.page_chunks(Address::NULL, x32_pae::ARCH.page_size()) .find(|(a, c)| check_page(*a, c)) .map(|(a, _)| StartBlock { - arch: x32_pae::ARCH, - kernel_hint: 0.into(), + arch: x32_pae::ARCH.ident(), + kernel_hint: Address::NULL, dtb: a, }) - .ok_or_else(|| Error::Initialization("unable to find x86_pae dtb in lowstub < 16M")) + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::NotFound) + .log_warn("unable to find x86_pae dtb in lowstub < 16M") + }) } diff --git a/apex_dma/memflow_lib/memflow-win32/src/kernel/sysproc.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/sysproc.rs similarity index 58% rename from apex_dma/memflow_lib/memflow-win32/src/kernel/sysproc.rs rename to apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/sysproc.rs index ef997f0..879dd6d 100644 --- a/apex_dma/memflow_lib/memflow-win32/src/kernel/sysproc.rs +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/kernel/sysproc.rs @@ -2,18 +2,19 @@ use std::prelude::v1::*; use super::ntos::pehelper; use super::StartBlock; -use crate::error::{Error, Result}; use std::convert::TryInto; use log::{debug, info, warn}; -use memflow::mem::VirtualMemory; -use memflow::types::{size, Address}; +use memflow::architecture::ArchitectureObj; +use memflow::error::{Error, ErrorKind, ErrorOrigin, Result}; +use memflow::mem::MemoryView; +use memflow::types::{size, umem, Address}; use pelite::{self, pe64::exports::Export, PeView}; -pub fn find( +pub fn find( virt_mem: &mut T, start_block: &StartBlock, ntos: Address, @@ -30,54 +31,54 @@ pub fn find( Err(e) => warn!("{}", e), } - Err(Error::Initialization("unable to find system eprocess")) + Err(Error(ErrorOrigin::OsLayer, ErrorKind::NotFound).log_info("unable to find system eprocess")) } // find from exported symbol -pub fn find_exported( +pub fn find_exported( virt_mem: &mut T, start_block: &StartBlock, kernel_base: Address, ) -> Result
{ // PsInitialSystemProcess -> PsActiveProcessHead let image = pehelper::try_get_pe_image(virt_mem, kernel_base)?; - let pe = PeView::from_bytes(&image).map_err(Error::PE)?; + let pe = PeView::from_bytes(&image) + .map_err(|err| Error(ErrorOrigin::OsLayer, ErrorKind::InvalidExeFile).log_info(err))?; let sys_proc = match pe .get_export_by_name("PsInitialSystemProcess") - .map_err(Error::PE)? + .map_err(|err| Error(ErrorOrigin::OsLayer, ErrorKind::ExportNotFound).log_info(err))? { - Export::Symbol(s) => kernel_base + *s as usize, + Export::Symbol(s) => kernel_base + *s as umem, Export::Forward(_) => { - return Err(Error::Other( - "PsInitialSystemProcess found but it was a forwarded export", - )) + return Err(Error(ErrorOrigin::OsLayer, ErrorKind::ExportNotFound) + .log_info("PsInitialSystemProcess found but it was a forwarded export")) } }; info!("PsInitialSystemProcess found at 0x{:x}", sys_proc); + let arch_obj: ArchitectureObj = start_block.arch.into(); + // read containing value - let mut buf = vec![0u8; start_block.arch.size_addr()]; - let sys_proc_addr: Address = match start_block.arch.bits() { + let mut buf = vec![0u8; arch_obj.size_addr()]; + let sys_proc_addr: Address = match arch_obj.bits() { 64 => { - // TODO: replace by virt_read_into with ByteSwap - virt_mem.virt_read_raw_into(sys_proc, &mut buf)?; + virt_mem.read_raw_into(sys_proc, &mut buf)?; u64::from_le_bytes(buf[0..8].try_into().unwrap()).into() } 32 => { - // TODO: replace by virt_read_into with ByteSwap - virt_mem.virt_read_raw_into(sys_proc, &mut buf)?; + virt_mem.read_raw_into(sys_proc, &mut buf)?; u32::from_le_bytes(buf[0..4].try_into().unwrap()).into() } - _ => return Err(Error::InvalidArchitecture), + _ => return Err(Error(ErrorOrigin::OsLayer, ErrorKind::InvalidArchitecture)), }; Ok(sys_proc_addr) } -// scan in pdb +// TODO: scan in pdb // scan in section -pub fn find_in_section( +pub fn find_in_section( virt_mem: &mut T, _start_block: &StartBlock, ntos: Address, @@ -87,7 +88,7 @@ pub fn find_in_section( // ... check if its 32 or 64bit let mut header_buf = vec![0; size::mb(32)]; - virt_mem.virt_read_raw_into(ntos, &mut header_buf)?; + virt_mem.read_raw_into(ntos, &mut header_buf)?; /* let mut pe_opts = ParseOptions::default(); @@ -102,7 +103,6 @@ pub fn find_in_section( .ok_or_else(|| Error::new("unable to find section ALMOSTRO"))?; */ - Err(Error::Other( - "sysproc::find_in_section(): not implemented yet", - )) + Err(Error(ErrorOrigin::OsLayer, ErrorKind::NotImplemented) + .log_info("sysproc::find_in_section(): not implemented yet")) } diff --git a/apex_dma/memflow_lib/memflow-win32/src/lib.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/lib.rs similarity index 82% rename from apex_dma/memflow_lib/memflow-win32/src/lib.rs rename to apex_dma/memflow_lib/memflow-win32/memflow-win32/src/lib.rs index 389d797..0f5a754 100644 --- a/apex_dma/memflow_lib/memflow-win32/src/lib.rs +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/lib.rs @@ -6,8 +6,6 @@ It is used to interface with windows targets. #![cfg_attr(not(feature = "std"), no_std)] extern crate no_std_compat as std; -pub mod error; - pub mod kernel; pub mod offsets; @@ -16,7 +14,6 @@ pub mod win32; pub mod prelude { pub mod v1 { - pub use crate::error::*; pub use crate::kernel::*; pub use crate::offsets::*; pub use crate::win32::*; @@ -24,5 +21,5 @@ pub mod prelude { pub use v1::*; } -#[deprecated] -pub use prelude::v1::*; +#[cfg(feature = "plugins")] +pub mod plugins; diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/offsets/mod.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/offsets/mod.rs new file mode 100644 index 0000000..220ddf4 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/offsets/mod.rs @@ -0,0 +1,40 @@ +pub use memflow_win32_defs::offsets::*; + +use crate::prelude::v1::*; + +#[repr(align(16))] +struct Align16(pub T); + +#[cfg(feature = "embed_offsets")] +const WIN32_OFFSETS: Align16< + [u8; include_bytes!(concat!(env!("OUT_DIR"), "/win32_offsets.bin")).len()], +> = Align16(*include_bytes!(concat!( + env!("OUT_DIR"), + "/win32_offsets.bin" +))); + +pub fn offset_builder<'a>() -> Win32OffsetBuilder<'a> { + let builder = Win32Offsets::builder(); + + #[cfg(feature = "embed_offsets")] + { + // # Safety + // Struct padding and alignment is compile-time guaranteed by the struct (see mod offset_table). + let offsets = unsafe { + core::slice::from_raw_parts( + WIN32_OFFSETS.0.as_ptr() as *const Win32OffsetFile, + WIN32_OFFSETS.0.len() / std::mem::size_of::(), + ) + }; + builder.offset_list(offsets) + } + #[cfg(not(feature = "embed_offsets"))] + builder +} + +pub fn offset_builder_with_kernel_info<'a>( + kernel_info: &Win32KernelInfo, +) -> Win32OffsetBuilder<'a> { + let builder = offset_builder(); + kernel_info.into_offset_builder(builder) +} diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/plugins.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/plugins.rs new file mode 100644 index 0000000..ee3e19a --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/plugins.rs @@ -0,0 +1,155 @@ +use crate::offsets::SymbolStore; +use crate::win32::{Win32Kernel, Win32KernelBuilder}; + +use memflow::cglue; +use memflow::plugins::{args, OsArgs}; +use memflow::prelude::v1::*; +use memflow::types::cache::TimedCacheValidator; + +use std::time::Duration; + +#[os(name = "win32", accept_input = true, return_wrapped = true)] +pub fn create_os( + args: &OsArgs, + mem: Option>, + lib: LibArc, +) -> Result> { + let mem = mem.ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::Configuration).log_error("Must provide memory!") + })?; + + let builder = Win32Kernel::builder(mem); + build_dtb(builder, &args.extra_args, lib) +} + +fn build_final< + A: 'static + PhysicalMemory + Clone, + B: 'static + PhysicalMemory + Clone, + C: 'static + VirtualTranslate2 + Clone, +>( + kernel_builder: Win32KernelBuilder, + _: &Args, + lib: LibArc, +) -> Result> { + log::info!( + "Building kernel of type {}", + std::any::type_name::>() + ); + let kernel = kernel_builder.build()?; + Ok(group_obj!((kernel, lib) as OsInstance)) +} + +fn build_arch< + A: 'static + PhysicalMemory + Clone, + B: 'static + PhysicalMemory + Clone, + C: 'static + VirtualTranslate2 + Clone, +>( + builder: Win32KernelBuilder, + args: &Args, + lib: LibArc, +) -> Result> { + match args.get("arch").map(|a| a.to_lowercase()).as_deref() { + Some("x64") => build_final(builder.arch(ArchitectureIdent::X86(64, false)), args, lib), + Some("x32") => build_final(builder.arch(ArchitectureIdent::X86(32, false)), args, lib), + Some("x32_pae") => build_final(builder.arch(ArchitectureIdent::X86(32, true)), args, lib), + Some("aarch64") => build_final( + builder.arch(ArchitectureIdent::AArch64(size::kb(4))), + args, + lib, + ), + _ => build_final(builder, args, lib), + } +} + +fn build_symstore< + A: 'static + PhysicalMemory + Clone, + B: 'static + PhysicalMemory + Clone, + C: 'static + VirtualTranslate2 + Clone, +>( + builder: Win32KernelBuilder, + args: &Args, + lib: LibArc, +) -> Result> { + match args.get("symstore") { + Some("uncached") => build_arch( + builder.symbol_store(SymbolStore::new().no_cache()), + args, + lib, + ), + Some("none") => build_arch(builder.no_symbol_store(), args, lib), + _ => build_arch(builder, args, lib), + } +} + +fn build_kernel_hint< + A: 'static + PhysicalMemory + Clone, + B: 'static + PhysicalMemory + Clone, + C: 'static + VirtualTranslate2 + Clone, +>( + builder: Win32KernelBuilder, + args: &Args, + lib: LibArc, +) -> Result> { + match args + .get("kernel_hint") + .and_then(|d| u64::from_str_radix(d, 16).ok()) + { + Some(dtb) => build_symstore(builder.kernel_hint(Address::from(dtb)), args, lib), + _ => build_symstore(builder, args, lib), + } +} + +fn build_vat< + A: 'static + PhysicalMemory + Clone, + B: 'static + PhysicalMemory + Clone, + C: 'static + VirtualTranslate2 + Clone, +>( + builder: Win32KernelBuilder, + args: &Args, + lib: LibArc, +) -> Result> { + match args::parse_vatcache(args)? { + Some((0, _)) => build_kernel_hint( + builder.build_vat_cache(|v, a| { + CachedVirtualTranslate::builder(v).arch(a).build().unwrap() + }), + args, + lib, + ), + Some((size, time)) => build_kernel_hint( + builder.build_vat_cache(move |v, a| { + let builder = CachedVirtualTranslate::builder(v).arch(a).entries(size); + + if time > 0 { + builder + .validator(TimedCacheValidator::new(Duration::from_millis(time).into())) + .build() + .unwrap() + } else { + builder.build().unwrap() + } + }), + args, + lib, + ), + None => build_kernel_hint(builder, args, lib), + } +} + +fn build_dtb< + A: 'static + PhysicalMemory + Clone, + B: 'static + PhysicalMemory + Clone, + C: 'static + VirtualTranslate2 + Clone, +>( + builder: Win32KernelBuilder, + args: &Args, + lib: LibArc, +) -> Result> { + match args + .get("dtb") + .and_then(|d| u64::from_str_radix(d, 16).ok()) + { + Some(dtb) => build_vat(builder.dtb(Address::from(dtb)), args, lib), + _ => build_vat(builder, args, lib), + } +} diff --git a/apex_dma/memflow_lib/memflow-win32/src/win32.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32.rs similarity index 69% rename from apex_dma/memflow_lib/memflow-win32/src/win32.rs rename to apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32.rs index 51dc6bb..988fa4a 100644 --- a/apex_dma/memflow_lib/memflow-win32/src/win32.rs +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32.rs @@ -2,9 +2,9 @@ pub mod kernel; pub mod kernel_builder; pub mod kernel_info; -pub use kernel::Kernel; -pub use kernel_builder::KernelBuilder; -pub use kernel_info::KernelInfo; +pub use kernel::Win32Kernel; +pub use kernel_builder::Win32KernelBuilder; +pub use kernel_info::Win32KernelInfo; pub mod keyboard; pub mod module; diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/kernel.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/kernel.rs new file mode 100644 index 0000000..9ee9e22 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/kernel.rs @@ -0,0 +1,714 @@ +mod mem_map; + +use crate::{ + offsets::{Win32ArchOffsets, Win32Offsets}, + prelude::{VirtualReadUnicodeString, Win32ExitStatus, EXIT_STATUS_STILL_ACTIVE}, +}; + +use super::{ + process::IMAGE_FILE_NAME_LENGTH, Win32KernelBuilder, Win32KernelInfo, Win32Keyboard, + Win32ModuleListInfo, Win32Process, Win32ProcessInfo, Win32VirtualTranslate, +}; + +use memflow::mem::virt_translate::*; +use memflow::prelude::v1::{Result, *}; + +#[cfg(feature = "plugins")] +use memflow::cglue; +#[cfg(feature = "plugins")] +use memflow::mem::{memory_view::*, phys_mem::*}; +#[cfg(feature = "plugins")] +use memflow::os::keyboard::*; + +use log::{info, trace}; +use std::convert::TryInto; +use std::fmt; +use std::prelude::v1::*; + +use pelite::{self, pe64::exports::Export, PeView}; + +const MAX_ITER_COUNT: usize = 65536; + +#[cfg(feature = "plugins")] +cglue_impl_group!(Win32Kernel, OsInstance<'a>, { PhysicalMemory, MemoryView, VirtualTranslate, OsKeyboard }); + +#[derive(Clone)] +pub struct Win32Kernel { + pub virt_mem: VirtualDma, + pub offsets: Win32Offsets, + + pub kernel_info: Win32KernelInfo, + pub sysproc_dtb: Address, + + pub kernel_modules: Option, +} + +impl + Win32Kernel +{ + pub fn new(phys_mem: T, vat: V, offsets: Win32Offsets, kernel_info: Win32KernelInfo) -> Self { + let mut virt_mem = VirtualDma::with_vat( + phys_mem, + kernel_info.os_info.arch, + Win32VirtualTranslate::new(kernel_info.os_info.arch, kernel_info.dtb), + vat, + ); + + if offsets.phys_mem_block() != 0 { + match kernel_info.os_info.arch.into_obj().bits() { + 32 => { + if let Some(mem_map) = mem_map::parse::<_, u32>( + &mut virt_mem, + kernel_info.os_info.base + offsets.phys_mem_block(), + ) { + // update mem mapping in connector + info!("updating connector mem_map={:?}", mem_map); + let (mut phys_mem, vat) = virt_mem.into_inner(); + phys_mem.set_mem_map(mem_map.into_vec().as_slice()); + virt_mem = VirtualDma::with_vat( + phys_mem, + kernel_info.os_info.arch, + Win32VirtualTranslate::new(kernel_info.os_info.arch, kernel_info.dtb), + vat, + ); + } + } + 64 => { + if let Some(mem_map) = mem_map::parse::<_, u64>( + &mut virt_mem, + kernel_info.os_info.base + offsets.phys_mem_block(), + ) { + // update mem mapping in connector + info!("updating connector mem_map={:?}", mem_map); + let (mut phys_mem, vat) = virt_mem.into_inner(); + phys_mem.set_mem_map(mem_map.into_vec().as_slice()); + virt_mem = VirtualDma::with_vat( + phys_mem, + kernel_info.os_info.arch, + Win32VirtualTranslate::new(kernel_info.os_info.arch, kernel_info.dtb), + vat, + ); + } + } + _ => {} + } + } + + // start_block only contains the winload's dtb which might + // be different to the one used in the actual kernel. + // In case of a failure this will fall back to the winload dtb. + // Read dtb of first process in eprocess list: + let sysproc_dtb = if let Some(Some(dtb)) = virt_mem + .read_addr_arch( + kernel_info.os_info.arch.into(), + kernel_info.eprocess_base + offsets.kproc_dtb(), + ) + .ok() + .map(|a| a.as_page_aligned(4096).non_null()) + { + info!("updating sysproc_dtb={:x}", dtb); + let (phys_mem, vat) = virt_mem.into_inner(); + virt_mem = VirtualDma::with_vat( + phys_mem, + kernel_info.os_info.arch, + Win32VirtualTranslate::new(kernel_info.os_info.arch, dtb), + vat, + ); + dtb + } else { + kernel_info.dtb + }; + + Self { + virt_mem, + offsets, + + kernel_info, + sysproc_dtb, + kernel_modules: None, + } + } + + pub fn kernel_modules(&mut self) -> Result { + if let Some(info) = self.kernel_modules { + Ok(info) + } else { + let image = self.virt_mem.read_raw( + self.kernel_info.os_info.base, + self.kernel_info.os_info.size.try_into().unwrap(), + )?; + let pe = PeView::from_bytes(&image).map_err(|err| { + Error(ErrorOrigin::OsLayer, ErrorKind::InvalidExeFile).log_info(err) + })?; + let addr = match pe.get_export_by_name("PsLoadedModuleList").map_err(|err| { + Error(ErrorOrigin::OsLayer, ErrorKind::ExportNotFound).log_info(err) + })? { + Export::Symbol(s) => self.kernel_info.os_info.base + *s as umem, + Export::Forward(_) => { + return Err(Error(ErrorOrigin::OsLayer, ErrorKind::ExportNotFound) + .log_info("PsLoadedModuleList found but it was a forwarded export")) + } + }; + + let addr = self + .virt_mem + .read_addr_arch(self.kernel_info.os_info.arch.into(), addr)?; + + let info = Win32ModuleListInfo::with_base(addr, self.kernel_info.os_info.arch)?; + + self.kernel_modules = Some(info); + Ok(info) + } + } + + /// Consumes this kernel and return the underlying owned memory and vat objects + pub fn into_inner(self) -> (T, V) { + self.virt_mem.into_inner() + } + + pub fn kernel_process_info(&mut self) -> Result { + let kernel_modules = self.kernel_modules()?; + + let vad_root = self.read_addr_arch( + self.kernel_info.os_info.arch.into(), + self.kernel_info.os_info.base + self.offsets.eproc_vad_root(), + )?; + + Ok(Win32ProcessInfo { + base_info: ProcessInfo { + address: self.kernel_info.os_info.base, + pid: 0, + state: ProcessState::Alive, + name: "ntoskrnl.exe".into(), + path: "".into(), + command_line: "".into(), + sys_arch: self.kernel_info.os_info.arch, + proc_arch: self.kernel_info.os_info.arch, + dtb1: self.sysproc_dtb, + dtb2: Address::invalid(), + }, + section_base: Address::NULL, // TODO: see below + ethread: Address::NULL, // TODO: see below + wow64: Address::NULL, + + teb: None, + teb_wow64: None, + + peb_native: None, + peb_wow64: None, + + module_info_native: Some(kernel_modules), + module_info_wow64: None, + + vad_root, + }) + } + + pub fn process_info_from_base_info( + &mut self, + base_info: ProcessInfo, + ) -> Result { + let section_base = self.virt_mem.read_addr_arch( + self.kernel_info.os_info.arch.into(), + base_info.address + self.offsets.eproc_section_base(), + )?; + trace!("section_base={:x}", section_base); + + // find first ethread + let ethread = self.virt_mem.read_addr_arch( + self.kernel_info.os_info.arch.into(), + base_info.address + self.offsets.eproc_thread_list(), + )? - self.offsets.ethread_list_entry(); + trace!("ethread={:x}", ethread); + + let peb_native = self + .virt_mem + .read_addr_arch( + self.kernel_info.os_info.arch.into(), + base_info.address + self.offsets.eproc_peb(), + )? + .non_null(); + + // TODO: Avoid doing this twice + let wow64 = if self.offsets.eproc_wow64() == 0 { + trace!("eproc_wow64=null; skipping wow64 detection"); + Address::null() + } else { + trace!( + "eproc_wow64={:x}; trying to read wow64 pointer", + self.offsets.eproc_wow64() + ); + self.virt_mem.read_addr_arch( + self.kernel_info.os_info.arch.into(), + base_info.address + self.offsets.eproc_wow64(), + )? + }; + trace!("wow64={:x}", wow64); + + let mut peb_wow64 = None; + + // TODO: does this need to be read with the process ctx? + let (teb, teb_wow64) = if self.kernel_info.kernel_winver >= (6, 2).into() { + let teb = self.virt_mem.read_addr_arch( + self.kernel_info.os_info.arch.into(), + ethread + self.offsets.kthread_teb(), + )?; + + trace!("teb={:x}", teb); + + if !teb.is_null() { + ( + Some(teb), + if base_info.proc_arch == base_info.sys_arch { + None + } else { + Some(teb + 0x2000) + }, + ) + } else { + (None, None) + } + } else { + (None, None) + }; + + let vad_root = self.virt_mem.read_addr_arch( + self.kernel_info.os_info.arch.into(), + base_info.address + self.offsets.eproc_vad_root(), + )?; + + // construct reader with process dtb - win32 only uses/requires one dtb so we always store it in `dtb1` + // TODO: can tlb be used here already? + let (phys_mem, vat) = self.virt_mem.mem_vat_pair(); + let mut proc_reader = VirtualDma::with_vat( + phys_mem.forward_mut(), + base_info.proc_arch, + Win32VirtualTranslate::new(self.kernel_info.os_info.arch, base_info.dtb1), + vat, + ); + + if let Some(teb) = teb_wow64 { + // from here on out we are in the process context + // we will be using the process type architecture now + peb_wow64 = proc_reader + .read_addr_arch( + self.kernel_info.os_info.arch.into(), + teb + self.offsets.teb_peb_x86(), + )? + .non_null(); + + trace!("peb_wow64={:?}", peb_wow64); + } + + trace!("peb_native={:?}", peb_native); + + let module_info_native = peb_native + .map(|peb| Win32ModuleListInfo::with_peb(&mut proc_reader, peb, base_info.sys_arch)) + .transpose()?; + + let module_info_wow64 = peb_wow64 + .map(|peb| Win32ModuleListInfo::with_peb(&mut proc_reader, peb, base_info.proc_arch)) + .transpose()?; + + Ok(Win32ProcessInfo { + base_info, + + section_base, + ethread, + wow64, + + teb, + teb_wow64, + + peb_native, + peb_wow64, + + module_info_native, + module_info_wow64, + + vad_root, + }) + } + + fn process_info_fill(&mut self, info: Win32ProcessInfo) -> Result { + // get full process name from module list + let cloned_base = info.base_info.clone(); + let mut name = info.base_info.name.clone(); + let callback = &mut |m: ModuleInfo| { + if m.name.as_ref().starts_with(name.as_ref()) { + name = m.name; + false + } else { + true + } + }; + let sys_arch = info.base_info.sys_arch; + let mut process = self.process_by_info(cloned_base)?; + process.module_list_callback(Some(&sys_arch), callback.into())?; + + // get process_parameters + let offsets = Win32ArchOffsets::from(info.base_info.proc_arch); + let (path, command_line) = if let Some(Ok(peb_process_params)) = info.peb().map(|peb| { + process.read_addr_arch( + info.base_info.proc_arch.into(), + peb + offsets.peb_process_params, + ) + }) { + trace!("peb_process_params={:x}", peb_process_params); + let image_path_name = process + .read_unicode_string( + info.base_info.proc_arch.into(), + peb_process_params + offsets.ppm_image_path_name, + ) + .unwrap_or_default(); + + let command_line = process + .read_unicode_string( + info.base_info.proc_arch.into(), + peb_process_params + offsets.ppm_command_line, + ) + .unwrap_or_default(); + + (image_path_name.into(), command_line.into()) + } else { + ("".into(), "".into()) + }; + + Ok(Win32ProcessInfo { + base_info: ProcessInfo { + name, + path, + command_line, + ..info.base_info + }, + ..info + }) + } + + fn process_info_base_by_address(&mut self, address: Address) -> Result { + let dtb = self.virt_mem.read_addr_arch( + self.kernel_info.os_info.arch.into(), + address + self.offsets.kproc_dtb(), + )?; + trace!("dtb={:x}", dtb); + + let pid: Pid = self.virt_mem.read(address + self.offsets.eproc_pid())?; + trace!("pid={}", pid); + + let state = if let Ok(exit_status) = self + .virt_mem + .read::(address + self.offsets.eproc_exit_status()) + { + if exit_status == EXIT_STATUS_STILL_ACTIVE { + ProcessState::Alive + } else { + ProcessState::Dead(exit_status) + } + } else { + ProcessState::Unknown + }; + + let name: ReprCString = self + .virt_mem + .read_char_array(address + self.offsets.eproc_name(), IMAGE_FILE_NAME_LENGTH)? + .into(); + trace!("name={}", name); + + let wow64 = if self.offsets.eproc_wow64() == 0 { + trace!("eproc_wow64=null; skipping wow64 detection"); + Address::null() + } else { + trace!( + "eproc_wow64={:x}; trying to read wow64 pointer", + self.offsets.eproc_wow64() + ); + self.virt_mem.read_addr_arch( + self.kernel_info.os_info.arch.into(), + address + self.offsets.eproc_wow64(), + )? + }; + trace!("wow64={:x}", wow64); + + // determine process architecture + let sys_arch = self.kernel_info.os_info.arch; + trace!("sys_arch={:?}", sys_arch); + let proc_arch = match ArchitectureObj::from(sys_arch).bits() { + 64 => { + if wow64.is_null() { + sys_arch + } else { + ArchitectureIdent::X86(32, true) + } + } + 32 => sys_arch, + _ => return Err(Error(ErrorOrigin::OsLayer, ErrorKind::InvalidArchitecture)), + }; + trace!("proc_arch={:?}", proc_arch); + + Ok(ProcessInfo { + address, + pid, + state, + name, + path: "".into(), + command_line: "".into(), + sys_arch, + proc_arch, + dtb1: dtb, + dtb2: Address::invalid(), + }) + } +} + +impl Win32Kernel { + pub fn builder(connector: T) -> Win32KernelBuilder { + Win32KernelBuilder::::new(connector) + } +} + +impl AsMut for Win32Kernel { + fn as_mut(&mut self) -> &mut T { + self.virt_mem.phys_mem() + } +} + +impl AsMut> + for Win32Kernel +{ + fn as_mut(&mut self) -> &mut VirtualDma { + &mut self.virt_mem + } +} + +impl PhysicalMemory for Win32Kernel { + fn phys_read_raw_iter(&mut self, data: PhysicalReadMemOps) -> Result<()> { + self.virt_mem.phys_mem().phys_read_raw_iter(data) + } + + fn phys_write_raw_iter(&mut self, data: PhysicalWriteMemOps) -> Result<()> { + self.virt_mem.phys_mem().phys_write_raw_iter(data) + } + + fn metadata(&self) -> PhysicalMemoryMetadata { + self.virt_mem.phys_mem_ref().metadata() + } + + fn set_mem_map(&mut self, mem_map: &[PhysicalMemoryMapping]) { + self.virt_mem.phys_mem().set_mem_map(mem_map) + } +} + +impl MemoryView for Win32Kernel { + fn read_raw_iter(&mut self, data: ReadRawMemOps) -> Result<()> { + self.virt_mem.read_raw_iter(data) + } + + fn write_raw_iter(&mut self, data: WriteRawMemOps) -> Result<()> { + self.virt_mem.write_raw_iter(data) + } + + fn metadata(&self) -> MemoryViewMetadata { + self.virt_mem.metadata() + } +} + +impl VirtualTranslate for Win32Kernel { + fn virt_to_phys_list( + &mut self, + addrs: &[VtopRange], + out: VirtualTranslationCallback, + out_fail: VirtualTranslationFailCallback, + ) { + self.virt_mem.virt_to_phys_list(addrs, out, out_fail) + } +} + +impl Os + for Win32Kernel +{ + type ProcessType<'a> = Win32Process, Fwd<&'a mut V>, Win32VirtualTranslate>; + type IntoProcessType = Win32Process; + + /// Walks a process list and calls a callback for each process structure address + /// + /// The callback is fully opaque. We need this style so that C FFI can work seamlessly. + fn process_address_list_callback( + &mut self, + mut callback: AddressCallback, + ) -> memflow::error::Result<()> { + let list_start = self.kernel_info.eprocess_base + self.offsets.eproc_link(); + let mut list_entry = list_start; + + for _ in 0..MAX_ITER_COUNT { + let eprocess = list_entry - self.offsets.eproc_link(); + trace!("eprocess={}", eprocess); + + // test flink + blink before adding the process + let flink_entry = self + .virt_mem + .read_addr_arch(self.kernel_info.os_info.arch.into(), list_entry)?; + trace!("flink_entry={}", flink_entry); + let blink_entry = self.virt_mem.read_addr_arch( + self.kernel_info.os_info.arch.into(), + list_entry + self.offsets.list_blink(), + )?; + trace!("blink_entry={}", blink_entry); + + if flink_entry.is_null() + || blink_entry.is_null() + || flink_entry == list_start + || flink_entry == list_entry + { + break; + } + + trace!("found eprocess {:x}", eprocess); + if !callback.call(eprocess) { + break; + } + trace!("Continuing {:x} -> {:x}", list_entry, flink_entry); + + // continue + list_entry = flink_entry; + } + + Ok(()) + } + + /// Find process information by its internal address + fn process_info_by_address(&mut self, address: Address) -> memflow::error::Result { + let base_info = self.process_info_base_by_address(address)?; + if let Ok(info) = self.process_info_from_base_info(base_info.clone()) { + Ok(self.process_info_fill(info)?.base_info) + } else { + Ok(base_info) + } + } + + /// Creates a process by its internal address + /// + /// It will share the underlying memory resources + fn process_by_info( + &mut self, + info: ProcessInfo, + ) -> memflow::error::Result> { + let proc_info = self.process_info_from_base_info(info)?; + Ok(Win32Process::with_kernel_ref(self, proc_info)) + } + + /// Creates a process by its internal address + /// + /// It will consume the kernel and not affect memory usage + /// + /// If no process with the specified address can be found this function will return an Error. + /// + /// This function can be useful for quickly accessing a process. + fn into_process_by_info( + mut self, + info: ProcessInfo, + ) -> memflow::error::Result { + let proc_info = self.process_info_from_base_info(info)?; + Ok(Win32Process::with_kernel(self, proc_info)) + } + + /// Walks the kernel module list and calls the provided callback for each module structure + /// address + /// + /// # Arguments + /// * `callback` - where to pass each matching module to. This is an opaque callback. + fn module_address_list_callback( + &mut self, + callback: AddressCallback, + ) -> memflow::error::Result<()> { + self.kernel_modules()? + .module_entry_list_callback::>( + self, + self.kernel_info.os_info.arch, + callback, + ) + .map_err(From::from) + } + + /// Retrieves a module by its structure address + /// + /// # Arguments + /// * `address` - address where module's information resides in + fn module_by_address(&mut self, address: Address) -> memflow::error::Result { + self.kernel_modules()? + .module_info_from_entry( + address, + self.kernel_info.eprocess_base, + &mut self.virt_mem, + self.kernel_info.os_info.arch, + ) + .map_err(From::from) + } + + /// Retrieves address of the primary module structure of the process + /// + /// This will generally be for the initial executable that was run + fn primary_module_address(&mut self) -> Result
{ + Ok(self.module_by_name("ntoskrnl.exe")?.address) + } + + /// Retrieves information for the primary module of the process + /// + /// This will generally be the initial executable that was run + fn primary_module(&mut self) -> Result { + self.module_by_name("ntoskrnl.exe") + } + + /// Retrieves a list of all imports of a given module + fn module_import_list_callback( + &mut self, + info: &ModuleInfo, + callback: ImportCallback, + ) -> Result<()> { + memflow::os::util::module_import_list_callback(&mut self.virt_mem, info, callback) + } + + /// Retrieves a list of all exports of a given module + fn module_export_list_callback( + &mut self, + info: &ModuleInfo, + callback: ExportCallback, + ) -> Result<()> { + memflow::os::util::module_export_list_callback(&mut self.virt_mem, info, callback) + } + + /// Retrieves a list of all sections of a given module + fn module_section_list_callback( + &mut self, + info: &ModuleInfo, + callback: SectionCallback, + ) -> Result<()> { + memflow::os::util::module_section_list_callback(&mut self.virt_mem, info, callback) + } + + /// Retrieves the kernel info + fn info(&self) -> &OsInfo { + &self.kernel_info.os_info + } +} + +impl OsKeyboard + for Win32Kernel +{ + type KeyboardType<'a> = + Win32Keyboard, Fwd<&'a mut V>, Win32VirtualTranslate>>; + type IntoKeyboardType = Win32Keyboard>; + + fn keyboard(&mut self) -> memflow::error::Result> { + Win32Keyboard::with_kernel_ref(self) + } + + fn into_keyboard(self) -> memflow::error::Result { + Win32Keyboard::with_kernel(self) + } +} + +impl fmt::Debug for Win32Kernel { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self.kernel_info) + } +} diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/kernel/mem_map.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/kernel/mem_map.rs new file mode 100644 index 0000000..a74dd75 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/kernel/mem_map.rs @@ -0,0 +1,66 @@ +use std::prelude::v1::*; + +use log::{info, trace}; +use std::fmt; + +use memflow::mem::{MemoryMap, MemoryView}; +use memflow::types::{mem, umem, Address}; + +use memflow::dataview::Pod; + +#[allow(clippy::unnecessary_cast)] +const SIZE_4KB: u64 = mem::kb(4) as u64; + +/// The number of PhysicalMemoryRuns contained in the Header +pub const PHYSICAL_MEMORY_MAX_RUNS: usize = 32; + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct PhysicalMemoryRun { + pub base_page: T, + pub page_count: T, +} +unsafe impl Pod for PhysicalMemoryRun {} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct PhysicalMemoryDescriptor { + pub number_of_runs: T, + pub number_of_pages: T, + pub runs: [PhysicalMemoryRun; PHYSICAL_MEMORY_MAX_RUNS], +} +unsafe impl Pod for PhysicalMemoryDescriptor {} +const _: [(); std::mem::size_of::>()] = [(); 0x108]; +const _: [(); std::mem::size_of::>()] = [(); 0x210]; + +pub fn parse>( + virt_mem: &mut T, + descriptor_ptr_ptr: Address, +) -> Option> { + let descriptor_ptr = virt_mem.read_addr64(descriptor_ptr_ptr).ok()?; + + trace!("found phys_mem_block pointer at: {}", descriptor_ptr); + let descriptor: PhysicalMemoryDescriptor = virt_mem.read(descriptor_ptr).ok()?; + + trace!("found phys_mem_block: {:?}", descriptor); + if descriptor.number_of_runs.into() <= PHYSICAL_MEMORY_MAX_RUNS as u64 { + let mut mem_map = MemoryMap::new(); + + for i in 0..descriptor.number_of_runs.into() { + let base = descriptor.runs[i as usize].base_page.into() * SIZE_4KB; + let size = descriptor.runs[i as usize].page_count.into() * SIZE_4KB; + + trace!("adding memory mapping: base={:x} size={:x}", base, size); + mem_map.push_remap(base.into(), size as umem, Address::from(base)); + } + + Some(mem_map) + } else { + info!( + "too many memory segments in phys_mem_block: {} found, {} expected", + descriptor.number_of_runs.into(), + PHYSICAL_MEMORY_MAX_RUNS + ); + None + } +} diff --git a/apex_dma/memflow_lib/memflow-win32/src/win32/kernel_builder.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/kernel_builder.rs similarity index 65% rename from apex_dma/memflow_lib/memflow-win32/src/win32/kernel_builder.rs rename to apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/kernel_builder.rs index 5ffd99b..60cb0e4 100644 --- a/apex_dma/memflow_lib/memflow-win32/src/win32/kernel_builder.rs +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/kernel_builder.rs @@ -1,18 +1,21 @@ use std::prelude::v1::*; -use super::{Kernel, KernelInfo}; -use crate::error::Result; +use super::{Win32Kernel, Win32KernelInfo}; use crate::offsets::Win32Offsets; #[cfg(feature = "symstore")] use crate::offsets::SymbolStore; -use memflow::architecture::ArchitectureObj; +use crate::offsets::offset_builder_with_kernel_info; + +use memflow::architecture::ArchitectureIdent; +use memflow::cglue::forward::ForwardMut; +use memflow::error::Result; use memflow::mem::{ - CachedMemoryAccess, CachedVirtualTranslate, DefaultCacheValidator, DirectTranslate, - PhysicalMemory, VirtualTranslate, + phys_mem::CachedPhysicalMemory, virt_translate::CachedVirtualTranslate, DirectTranslate, + PhysicalMemory, VirtualTranslate2, }; -use memflow::types::Address; +use memflow::types::{Address, DefaultCacheValidator}; /// Builder for a Windows Kernel structure. /// @@ -20,10 +23,10 @@ use memflow::types::Address; /// and will make sure the user gets a properly initialized object at the end. /// /// This function is a high level abstraction over the individual parts of initialization a Windows target: -/// - Scanning for the ntoskrnl and retrieving the `KernelInfo` struct. +/// - Scanning for the ntoskrnl and retrieving the `Win32KernelInfo` struct. /// - Retrieving the Offsets for the target Windows version. -/// - Creating a struct which implements `VirtualTranslate` for virtual to physical address translations. -/// - Optionally wrapping the Connector or the `VirtualTranslate` object into a cached object. +/// - Creating a struct which implements `VirtualTranslate2` for virtual to physical address translations. +/// - Optionally wrapping the Connector or the `VirtualTranslate2` object into a cached object. /// - Initialization of the Kernel structure itself. /// /// # Examples @@ -31,10 +34,10 @@ use memflow::types::Address; /// Using the builder with default values: /// ``` /// use memflow::mem::PhysicalMemory; -/// use memflow_win32::win32::Kernel; +/// use memflow_win32::win32::Win32Kernel; /// -/// fn test(connector: T) { -/// let _kernel = Kernel::builder(connector) +/// fn test(connector: T) { +/// let _kernel = Win32Kernel::builder(connector) /// .build() /// .unwrap(); /// } @@ -43,10 +46,10 @@ use memflow::types::Address; /// Using the builder with default cache configurations: /// ``` /// use memflow::mem::PhysicalMemory; -/// use memflow_win32::win32::Kernel; +/// use memflow_win32::win32::Win32Kernel; /// -/// fn test(connector: T) { -/// let _kernel = Kernel::builder(connector) +/// fn test(connector: T) { +/// let _kernel = Win32Kernel::builder(connector) /// .build_default_caches() /// .build() /// .unwrap(); @@ -55,13 +58,13 @@ use memflow::types::Address; /// /// Customizing the caches: /// ``` -/// use memflow::mem::{PhysicalMemory, CachedMemoryAccess, CachedVirtualTranslate}; -/// use memflow_win32::win32::Kernel; +/// use memflow::mem::{PhysicalMemory, CachedPhysicalMemory, CachedVirtualTranslate}; +/// use memflow_win32::win32::Win32Kernel; /// -/// fn test(connector: T) { -/// let _kernel = Kernel::builder(connector) +/// fn test(connector: T) { +/// let _kernel = Win32Kernel::builder(connector) /// .build_page_cache(|connector, arch| { -/// CachedMemoryAccess::builder(connector) +/// CachedPhysicalMemory::builder(connector) /// .arch(arch) /// .build() /// .unwrap() @@ -82,53 +85,58 @@ use memflow::types::Address; /// Manual initialization of the above examples would look like the following: /// ``` /// use memflow::prelude::v1::*; -/// use memflow_win32::prelude::{KernelInfo, Win32Offsets, Kernel}; +/// use memflow_win32::prelude::{ +/// Win32KernelInfo, +/// Win32Offsets, +/// Win32Kernel, +/// offset_builder_with_kernel_info +/// }; /// -/// fn test(mut connector: T) { +/// fn test(mut connector: T) { /// // Use the ntoskrnl scanner to find the relevant KernelInfo (start_block, arch, dtb, ntoskrnl, etc) -/// let kernel_info = KernelInfo::scanner(&mut connector).scan().unwrap(); +/// let kernel_info = Win32KernelInfo::scanner(connector.forward_mut()).scan().unwrap(); /// // Download the corresponding pdb from the default symbol store -/// let offsets = Win32Offsets::builder().kernel_info(&kernel_info).build().unwrap(); +/// let offsets = offset_builder_with_kernel_info(&kernel_info).build().unwrap(); /// /// // Create a struct for doing virtual to physical memory translations /// let vat = DirectTranslate::new(); /// /// // Create a Page Cache layer with default values -/// let mut connector_cached = CachedMemoryAccess::builder(connector) -/// .arch(kernel_info.start_block.arch) +/// let mut connector_cached = CachedPhysicalMemory::builder(connector) +/// .arch(kernel_info.os_info.arch) /// .build() /// .unwrap(); /// -/// // Create a TLB Cache layer with default values +/// // Create a Tlb Cache layer with default values /// let vat_cached = CachedVirtualTranslate::builder(vat) -/// .arch(kernel_info.start_block.arch) +/// .arch(kernel_info.os_info.arch) /// .build() /// .unwrap(); /// /// // Initialize the final Kernel object -/// let _kernel = Kernel::new(&mut connector_cached, vat_cached, offsets, kernel_info); +/// let _kernel = Win32Kernel::new(connector_cached, vat_cached, offsets, kernel_info); /// } /// ``` -pub struct KernelBuilder { +pub struct Win32KernelBuilder { connector: T, - arch: Option, + arch: Option, kernel_hint: Option
, dtb: Option
, #[cfg(feature = "symstore")] symbol_store: Option, - build_page_cache: Box TK>, - build_vat_cache: Box VK>, + build_page_cache: Box TK>, + build_vat_cache: Box VK>, } -impl KernelBuilder +impl Win32KernelBuilder where T: PhysicalMemory, { - pub fn new(connector: T) -> KernelBuilder { - KernelBuilder { + pub fn new(connector: T) -> Win32KernelBuilder { + Win32KernelBuilder { connector, arch: None, @@ -144,15 +152,15 @@ where } } -impl<'a, T, TK, VK> KernelBuilder +impl<'a, T, TK, VK> Win32KernelBuilder where T: PhysicalMemory, - TK: PhysicalMemory, - VK: VirtualTranslate, + TK: 'static + PhysicalMemory + Clone, + VK: 'static + VirtualTranslate2 + Clone, { - pub fn build(mut self) -> Result> { + pub fn build(mut self) -> Result> { // find kernel_info - let mut kernel_scanner = KernelInfo::scanner(&mut self.connector); + let mut kernel_scanner = Win32KernelInfo::scanner(self.connector.forward_mut()); if let Some(arch) = self.arch { kernel_scanner = kernel_scanner.arch(arch); } @@ -167,16 +175,17 @@ where // acquire offsets from the symbol store let offsets = self.build_offsets(&kernel_info)?; + // TODO: parse memory maps + // create a vat object let vat = DirectTranslate::new(); // create caches - let kernel_connector = - (self.build_page_cache)(self.connector, kernel_info.start_block.arch); - let kernel_vat = (self.build_vat_cache)(vat, kernel_info.start_block.arch); + let kernel_connector = (self.build_page_cache)(self.connector, kernel_info.os_info.arch); + let kernel_vat = (self.build_vat_cache)(vat, kernel_info.os_info.arch); // create the final kernel object - Ok(Kernel::new( + Ok(Win32Kernel::new( kernel_connector, kernel_vat, offsets, @@ -185,22 +194,22 @@ where } #[cfg(feature = "symstore")] - fn build_offsets(&self, kernel_info: &KernelInfo) -> Result { - let mut builder = Win32Offsets::builder(); + fn build_offsets(&self, kernel_info: &Win32KernelInfo) -> Result { + let mut builder = offset_builder_with_kernel_info(kernel_info); if let Some(store) = &self.symbol_store { builder = builder.symbol_store(store.clone()); } else { builder = builder.no_symbol_store(); } - builder.kernel_info(kernel_info).build() + builder.build() } #[cfg(not(feature = "symstore"))] - fn build_offsets(&self, kernel_info: &KernelInfo) -> Result { - Win32Offsets::builder().kernel_info(&kernel_info).build() + fn build_offsets(&self, kernel_info: &Win32KernelInfo) -> Result { + offset_builder_with_kernel_info(&kernel_info).build() } - pub fn arch(mut self, arch: ArchitectureObj) -> Self { + pub fn arch(mut self, arch: ArchitectureIdent) -> Self { self.arch = Some(arch); self } @@ -222,10 +231,10 @@ where /// /// ``` /// use memflow::mem::PhysicalMemory; - /// use memflow_win32::prelude::{Kernel, SymbolStore}; + /// use memflow_win32::prelude::{Win32Kernel, SymbolStore}; /// - /// fn test(connector: T) { - /// let _kernel = Kernel::builder(connector) + /// fn test(connector: T) { + /// let _kernel = Win32Kernel::builder(connector) /// .symbol_store(SymbolStore::new().no_cache()) /// .build() /// .unwrap(); @@ -246,11 +255,11 @@ where /// /// ``` /// use memflow::mem::PhysicalMemory; - /// use memflow_win32::win32::Kernel; + /// use memflow_win32::win32::Win32Kernel; /// use memflow_win32::offsets::SymbolStore; /// - /// fn test(connector: T) { - /// let _kernel = Kernel::builder(connector) + /// fn test(connector: T) { + /// let _kernel = Win32Kernel::builder(connector) /// .no_symbol_store() /// .build() /// .unwrap(); @@ -272,10 +281,10 @@ where /// /// ``` /// use memflow::mem::PhysicalMemory; - /// use memflow_win32::win32::Kernel; + /// use memflow_win32::win32::Win32Kernel; /// - /// fn test(connector: T) { - /// let _kernel = Kernel::builder(connector) + /// fn test(connector: T) { + /// let _kernel = Win32Kernel::builder(connector) /// .build_default_caches() /// .build() /// .unwrap(); @@ -283,12 +292,12 @@ where /// ``` pub fn build_default_caches( self, - ) -> KernelBuilder< + ) -> Win32KernelBuilder< T, - CachedMemoryAccess<'a, T, DefaultCacheValidator>, + CachedPhysicalMemory<'a, T, DefaultCacheValidator>, CachedVirtualTranslate, > { - KernelBuilder { + Win32KernelBuilder { connector: self.connector, arch: self.arch, @@ -299,7 +308,7 @@ where symbol_store: self.symbol_store, build_page_cache: Box::new(|connector, arch| { - CachedMemoryAccess::builder(connector) + CachedPhysicalMemory::builder(connector) .arch(arch) .build() .unwrap() @@ -321,13 +330,13 @@ where /// # Examples /// /// ``` - /// use memflow::mem::{PhysicalMemory, CachedMemoryAccess}; - /// use memflow_win32::win32::Kernel; + /// use memflow::mem::{PhysicalMemory, CachedPhysicalMemory}; + /// use memflow_win32::win32::Win32Kernel; /// - /// fn test(connector: T) { - /// let _kernel = Kernel::builder(connector) + /// fn test(connector: T) { + /// let _kernel = Win32Kernel::builder(connector) /// .build_page_cache(|connector, arch| { - /// CachedMemoryAccess::builder(connector) + /// CachedPhysicalMemory::builder(connector) /// .arch(arch) /// .build() /// .unwrap() @@ -336,14 +345,14 @@ where /// .unwrap(); /// } /// ``` - pub fn build_page_cache TKN + 'static>( + pub fn build_page_cache TKN + 'static>( self, func: F, - ) -> KernelBuilder + ) -> Win32KernelBuilder where TKN: PhysicalMemory, { - KernelBuilder { + Win32KernelBuilder { connector: self.connector, arch: self.arch, @@ -367,10 +376,10 @@ where /// /// ``` /// use memflow::mem::{PhysicalMemory, CachedVirtualTranslate}; - /// use memflow_win32::win32::Kernel; + /// use memflow_win32::win32::Win32Kernel; /// - /// fn test(connector: T) { - /// let _kernel = Kernel::builder(connector) + /// fn test(connector: T) { + /// let _kernel = Win32Kernel::builder(connector) /// .build_vat_cache(|vat, arch| { /// CachedVirtualTranslate::builder(vat) /// .arch(arch) @@ -381,14 +390,14 @@ where /// .unwrap(); /// } /// ``` - pub fn build_vat_cache VKN + 'static>( + pub fn build_vat_cache VKN + 'static>( self, func: F, - ) -> KernelBuilder + ) -> Win32KernelBuilder where - VKN: VirtualTranslate, + VKN: VirtualTranslate2, { - KernelBuilder { + Win32KernelBuilder { connector: self.connector, arch: self.arch, diff --git a/apex_dma/memflow_lib/memflow-win32/src/win32/kernel_info.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/kernel_info.rs similarity index 65% rename from apex_dma/memflow_lib/memflow-win32/src/win32/kernel_info.rs rename to apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/kernel_info.rs index c63559c..2613588 100644 --- a/apex_dma/memflow_lib/memflow-win32/src/win32/kernel_info.rs +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/kernel_info.rs @@ -1,38 +1,59 @@ -use crate::error::Result; use crate::kernel::{self, StartBlock}; -use crate::kernel::{Win32GUID, Win32Version}; +use crate::kernel::{Win32Guid, Win32Version}; use log::{info, warn}; -use memflow::architecture::ArchitectureObj; -use memflow::mem::{DirectTranslate, PhysicalMemory, VirtualDMA}; +use memflow::architecture::ArchitectureIdent; +use memflow::cglue::forward::ForwardMut; +use memflow::error::Result; +use memflow::mem::{DirectTranslate, PhysicalMemory, VirtualDma}; +use memflow::os::OsInfo; use memflow::types::Address; use super::Win32VirtualTranslate; +use crate::offsets::Win32OffsetBuilder; + #[derive(Debug, Clone)] #[cfg_attr(feature = "serde", derive(::serde::Serialize))] -pub struct KernelInfo { - pub start_block: StartBlock, - - pub kernel_base: Address, - pub kernel_size: usize, +pub struct Win32KernelInfo { + pub os_info: OsInfo, + pub dtb: Address, - pub kernel_guid: Option, + pub kernel_guid: Option, pub kernel_winver: Win32Version, pub eprocess_base: Address, } -impl KernelInfo { +impl Win32KernelInfo { pub fn scanner(mem: T) -> KernelInfoScanner { KernelInfoScanner::new(mem) } + + pub fn into_offset_builder<'a>( + &self, + mut offsets: Win32OffsetBuilder<'a>, + ) -> Win32OffsetBuilder<'a> { + if offsets.get_guid().is_none() && self.kernel_guid.is_some() { + offsets = offsets.guid(self.kernel_guid.clone().unwrap()); + } + + if offsets.get_winver().is_none() { + offsets = offsets.winver(self.kernel_winver); + } + + if offsets.get_arch().is_none() { + offsets = offsets.arch(self.os_info.arch.into()); + } + + offsets + } } pub struct KernelInfoScanner { mem: T, - arch: Option, + arch: Option, kernel_hint: Option
, dtb: Option
, } @@ -47,7 +68,7 @@ impl KernelInfoScanner { } } - pub fn scan(mut self) -> Result { + pub fn scan(mut self) -> Result { let start_block = if let (Some(arch), Some(dtb), Some(kernel_hint)) = (self.arch, self.dtb, self.kernel_hint) { @@ -72,40 +93,40 @@ impl KernelInfoScanner { }) } - fn scan_block(&mut self, start_block: StartBlock) -> Result { + fn scan_block(&mut self, start_block: StartBlock) -> Result { info!( "arch={:?} kernel_hint={:x} dtb={:x}", start_block.arch, start_block.kernel_hint, start_block.dtb ); // construct virtual memory object for start_block - let mut virt_mem = VirtualDMA::with_vat( - &mut self.mem, + let mut virt_mem = VirtualDma::with_vat( + self.mem.forward_mut(), start_block.arch, Win32VirtualTranslate::new(start_block.arch, start_block.dtb), DirectTranslate::new(), ); // find ntoskrnl.exe base - let (kernel_base, kernel_size) = kernel::ntos::find(&mut virt_mem, &start_block)?; - info!("kernel_base={} kernel_size={}", kernel_base, kernel_size); + let (base, size) = kernel::ntos::find(&mut virt_mem, &start_block)?; + info!("base={} size={}", base, size); // get ntoskrnl.exe guid - let kernel_guid = kernel::ntos::find_guid(&mut virt_mem, kernel_base).ok(); + let kernel_guid = kernel::ntos::find_guid(&mut virt_mem, base).ok(); info!("kernel_guid={:?}", kernel_guid); - let kernel_winver = kernel::ntos::find_winver(&mut virt_mem, kernel_base).ok(); + let kernel_winver = kernel::ntos::find_winver(&mut virt_mem, base).ok(); if kernel_winver.is_none() { warn!("Failed to retrieve kernel version! Some features may be disabled."); } - let kernel_winver = kernel_winver.unwrap_or_default(); + let kernel_winver = kernel_winver.unwrap_or_else(|| Win32Version::new(3, 10, 511)); info!("kernel_winver={:?}", kernel_winver); // find eprocess base - let eprocess_base = kernel::sysproc::find(&mut virt_mem, &start_block, kernel_base)?; + let eprocess_base = kernel::sysproc::find(&mut virt_mem, &start_block, base)?; info!("eprocess_base={:x}", eprocess_base); // start_block only contains the winload's dtb which might @@ -113,11 +134,15 @@ impl KernelInfoScanner { // see Kernel::new() for more information. info!("start_block.dtb={:x}", start_block.dtb); - Ok(KernelInfo { - start_block, + let StartBlock { + arch, + kernel_hint: _, + dtb, + } = start_block; - kernel_base, - kernel_size, + Ok(Win32KernelInfo { + os_info: OsInfo { base, size, arch }, + dtb, kernel_guid, kernel_winver, @@ -126,7 +151,7 @@ impl KernelInfoScanner { }) } - pub fn arch(mut self, arch: ArchitectureObj) -> Self { + pub fn arch(mut self, arch: ArchitectureIdent) -> Self { self.arch = Some(arch); self } diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/keyboard.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/keyboard.rs new file mode 100644 index 0000000..ff4cf9e --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/keyboard.rs @@ -0,0 +1,339 @@ +/*! +Module for reading a target's keyboard state. + +The `gafAsyncKeyState` array contains the current Keyboard state on Windows targets. +This array will internally be read by the [`GetAsyncKeyState()`](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-getasynckeystate) function of Windows. + +Although the gafAsyncKeyState array is exported by the win32kbase.sys kernel module it is only properly mapped into user mode processes. +Therefor the Keyboard will by default find the winlogon.exe or wininit.exe process and use it as a proxy to read the data. + +# Examples: + +``` +use std::{thread, time}; + +use memflow::mem::{PhysicalMemory, VirtualTranslate2}; +use memflow::os::{Keyboard, KeyboardState}; +use memflow_win32::win32::{Win32Kernel, Win32Keyboard}; + +fn test(kernel: &mut Win32Kernel) { + let mut kbd = Win32Keyboard::with_kernel_ref(kernel).unwrap(); + + loop { + let kbs = kbd.state().unwrap(); + println!("space down: {:?}", kbs.is_down(0x20)); // VK_SPACE + thread::sleep(time::Duration::from_millis(1000)); + } +} +``` +*/ +use super::{Win32Kernel, Win32ProcessInfo, Win32VirtualTranslate}; + +use memflow::cglue::*; +use memflow::error::PartialResultExt; +use memflow::error::{Error, ErrorKind, ErrorOrigin, Result}; +use memflow::mem::{MemoryView, PhysicalMemory, VirtualDma, VirtualTranslate2}; +use memflow::os::keyboard::*; +use memflow::prelude::{ExportInfo, ModuleInfo, Os, Pid, Process}; +use memflow::types::{umem, Address}; + +#[cfg(feature = "plugins")] +use memflow::cglue; + +use log::debug; +use std::convert::TryInto; + +#[cfg(feature = "plugins")] +cglue_impl_group!(Win32Keyboard, IntoKeyboard); + +/// Interface for accessing the target's keyboard state. +#[derive(Clone, Debug)] +pub struct Win32Keyboard { + pub virt_mem: T, + key_state_addr: Address, +} + +impl + Win32Keyboard> +{ + pub fn with_kernel(mut kernel: Win32Kernel) -> Result { + let (user_process_info, key_state_addr) = Self::find_keystate(&mut kernel)?; + + let (phys_mem, vat) = kernel.virt_mem.into_inner(); + let virt_mem = VirtualDma::with_vat( + phys_mem, + user_process_info.base_info.proc_arch, + user_process_info.translator(), + vat, + ); + + Ok(Self { + virt_mem, + key_state_addr, + }) + } + + /// Consumes this keyboard, returning the underlying memory and vat objects + pub fn into_inner(self) -> (T, V) { + self.virt_mem.into_inner() + } +} + +impl<'a, T: 'static + PhysicalMemory + Clone, V: 'static + VirtualTranslate2 + Clone> + Win32Keyboard, Fwd<&'a mut V>, Win32VirtualTranslate>> +{ + /// Constructs a new keyboard object by borrowing a kernel object. + /// + /// Internally this will create a `VirtualDma` object that also + /// borrows the PhysicalMemory and Vat objects from the kernel. + /// + /// The resulting process object is NOT cloneable due to the mutable borrowing. + /// + /// When u need a cloneable Process u have to use the `::with_kernel` function + /// which will move the kernel object. + pub fn with_kernel_ref(kernel: &'a mut Win32Kernel) -> Result { + let (user_process_info, key_state_addr) = Self::find_keystate(kernel)?; + + let (phys_mem, vat) = kernel.virt_mem.mem_vat_pair(); + let virt_mem = VirtualDma::with_vat( + phys_mem.forward_mut(), + user_process_info.base_info.proc_arch, + user_process_info.translator(), + vat.forward_mut(), + ); + + Ok(Self { + virt_mem, + key_state_addr, + }) + } +} + +impl Win32Keyboard { + fn find_keystate< + P: 'static + PhysicalMemory + Clone, + V: 'static + VirtualTranslate2 + Clone, + >( + kernel: &mut Win32Kernel, + ) -> Result<(Win32ProcessInfo, Address)> { + let win32kbase_module_info = kernel.module_by_name("win32kbase.sys")?; + debug!("found win32kbase.sys: {:?}", win32kbase_module_info); + + let procs = kernel.process_info_list()?; + + let gaf = procs + .iter() + .filter(|p| { + p.name.as_ref() == "winlogon.exe" + || p.name.as_ref() == "explorer.exe" + || p.name.as_ref() == "taskhostw.exe" + || p.name.as_ref() == "smartscreen.exe" + || p.name.as_ref() == "dwm.exe" + }) + .find_map(|p| Self::find_in_user_process(kernel, &win32kbase_module_info, p.pid).ok()) + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::ExportNotFound) + .log_info("unable to find any proxy process that contains gafAsyncKeyState") + })?; + + Ok((gaf.0, gaf.1)) + } + + fn find_in_user_process< + P: 'static + PhysicalMemory + Clone, + V: 'static + VirtualTranslate2 + Clone, + >( + kernel: &mut Win32Kernel, + win32kbase_module_info: &ModuleInfo, + pid: Pid, + ) -> Result<(Win32ProcessInfo, Address)> { + let user_process_info = kernel.process_info_by_pid(pid)?; + let user_process_info_win32 = + kernel.process_info_from_base_info(user_process_info.clone())?; + let mut user_process = kernel.process_by_info(user_process_info)?; + debug!( + "trying to find gaf signature in user proxy process `{}`", + user_process.info().name.as_ref() + ); + + // TODO: lazy + let export_addr = Self::find_gaf_pe(&mut user_process.virt_mem, win32kbase_module_info) + .or_else(|_| Self::find_gaf_sig(&mut user_process.virt_mem, win32kbase_module_info))?; + debug!( + "found gaf signature in user proxy process `{}` at {:x}", + user_process.info().name.as_ref(), + export_addr + ); + + Ok(( + user_process_info_win32, + win32kbase_module_info.base + export_addr, + )) + } + + fn find_gaf_pe( + virt_mem: &mut impl MemoryView, + win32kbase_module_info: &ModuleInfo, + ) -> Result { + let mut offset = None; + let callback = &mut |export: ExportInfo| { + if export.name.as_ref() == "gafAsyncKeyState" { + offset = Some(export.offset); + false + } else { + true + } + }; + memflow::os::util::module_export_list_callback( + virt_mem, + win32kbase_module_info, + callback.into(), + )?; + offset.ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::ExportNotFound) + .log_info("unable to find gafAsyncKeyState") + }) + } + + // TODO: replace with a custom signature scanning crate + #[cfg(feature = "regex")] + fn find_gaf_sig( + virt_mem: &mut impl MemoryView, + win32kbase_module_info: &ModuleInfo, + ) -> Result { + use ::regex::bytes::*; + + let module_buf = virt_mem + .read_raw( + win32kbase_module_info.base, + win32kbase_module_info.size.try_into().unwrap(), + ) + .data_part()?; + + // 48 8B 05 ? ? ? ? 48 89 81 ? ? 00 00 48 8B 8F + 0x3 + let re = Regex::new("(?-u)\\x48\\x8B\\x05(?s:.)(?s:.)(?s:.)(?s:.)\\x48\\x89\\x81(?s:.)(?s:.)\\x00\\x00\\x48\\x8B\\x8F") + .map_err(|_| Error(ErrorOrigin::OsLayer, ErrorKind::Encoding).log_info("malformed gafAsyncKeyState signature"))?; + let buf_offs = re + .find(module_buf.as_slice()) + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::NotFound) + .log_info("unable to find gafAsyncKeyState signature") + })? + .start() + + 0x3; + + // compute rip relative addr + let export_offs = buf_offs as u32 + + u32::from_le_bytes(module_buf[buf_offs..buf_offs + 4].try_into().unwrap()) + + 0x4; + debug!("gafAsyncKeyState export found at: {:x}", export_offs); + Ok(export_offs as umem) + } + + #[cfg(not(feature = "regex"))] + fn find_gaf_sig( + virt_mem: &mut impl MemoryView, + win32kbase_module_info: &ModuleInfo, + ) -> Result { + Err( + Error(ErrorOrigin::OsLayer, ErrorKind::UnsupportedOptionalFeature) + .log_error("signature scanning requires std"), + ) + } +} + +macro_rules! get_ks_byte { + ($vk:expr) => { + $vk * 2 / 8 + }; +} + +macro_rules! get_ks_down_bit { + ($vk:expr) => { + 1 << (($vk % 4) * 2) + }; +} + +macro_rules! is_key_down { + ($ks:expr, $vk:expr) => { + ($ks[get_ks_byte!($vk) as usize] & get_ks_down_bit!($vk)) != 0 + }; +} + +macro_rules! set_key_down { + ($ks:expr, $vk:expr, $down:expr) => { + if $down { + ($ks[get_ks_byte!($vk) as usize] |= get_ks_down_bit!($vk)) + } else { + ($ks[get_ks_byte!($vk) as usize] &= !get_ks_down_bit!($vk)) + } + }; +} + +impl Keyboard for Win32Keyboard { + type KeyboardStateType = Win32KeyboardState; + + /// Reads the gafAsyncKeyState global from the win32kbase.sys kernel module and + /// returns true wether the given key was pressed. + /// This function accepts a valid microsoft virtual keycode. + /// In case of supplying a invalid key this function will just return false cleanly. + /// + /// A list of all Keycodes can be found on the [msdn](https://docs.microsoft.com/en-us/windows/win32/inputdev/virtual-key-codes). + fn is_down(&mut self, vk: i32) -> bool { + if !(0..=256).contains(&vk) { + false + } else if let Ok(buffer) = self + .virt_mem + .read::<[u8; 256 * 2 / 8]>(self.key_state_addr) + .data_part() + { + is_key_down!(buffer, vk) + } else { + false + } + } + + /// Writes the gafAsyncKeyState global to the win32kbase.sys kernel module. + /// + /// # Remarks: + /// + /// This will not enforce key presses in all applications on Windows. + /// It will only modify calls to GetKeyState / GetAsyncKeyState. + fn set_down(&mut self, vk: i32, down: bool) { + if (0..=256).contains(&vk) { + if let Ok(mut buffer) = self.virt_mem.read::<[u8; 256 * 2 / 8]>(self.key_state_addr) { + set_key_down!(buffer, vk, down); + self.virt_mem.write(self.key_state_addr, &buffer).ok(); + } + } + } + + /// Reads the gafAsyncKeyState global from the win32kbase.sys kernel module. + fn state(&mut self) -> memflow::error::Result { + let buffer: [u8; 256 * 2 / 8] = self.virt_mem.read(self.key_state_addr)?; + Ok(Win32KeyboardState { buffer }) + } +} + +/// Represents the current Keyboardstate. +/// +/// Internally this will hold a 256 * 2 / 8 byte long copy of the gafAsyncKeyState array from the target. +#[derive(Clone)] +pub struct Win32KeyboardState { + buffer: [u8; 256 * 2 / 8], +} + +impl KeyboardState for Win32KeyboardState { + /// Returns true wether the given key was pressed. + /// This function accepts a valid microsoft virtual keycode. + /// In case of supplying a invalid key this function will just return false cleanly. + /// + /// A list of all Keycodes can be found on the [msdn](https://docs.microsoft.com/en-us/windows/win32/inputdev/virtual-key-codes). + fn is_down(&self, vk: i32) -> bool { + if !(0..=256).contains(&vk) { + false + } else { + is_key_down!(self.buffer, vk) + } + } +} diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/module.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/module.rs new file mode 100644 index 0000000..3d0b58e --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/module.rs @@ -0,0 +1,152 @@ +use std::prelude::v1::*; + +use crate::offsets::Win32ArchOffsets; +use crate::win32::VirtualReadUnicodeString; + +use log::trace; + +use memflow::architecture::ArchitectureIdent; +use memflow::error::Result; +use memflow::mem::MemoryView; +use memflow::os::{AddressCallback, ModuleInfo}; +use memflow::types::Address; + +const MAX_ITER_COUNT: usize = 65536; + +#[derive(Debug, Clone, Copy)] +#[repr(C)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize))] +pub struct Win32ModuleListInfo { + module_base: Address, + offsets: Win32ArchOffsets, +} + +impl Win32ModuleListInfo { + pub fn with_peb( + mem: &mut impl MemoryView, + env_block: Address, + arch: ArchitectureIdent, + ) -> Result { + let offsets = Win32ArchOffsets::from(arch); + let arch_obj = arch.into(); + + trace!("peb_ldr_offs={:x}", offsets.peb_ldr); + trace!("ldr_list_offs={:x}", offsets.ldr_list); + + let env_block_ldr = mem.read_addr_arch(arch_obj, env_block + offsets.peb_ldr)?; + trace!("peb_ldr={:x}", env_block_ldr); + + let module_base = mem.read_addr_arch(arch_obj, env_block_ldr + offsets.ldr_list)?; + + Self::with_base(module_base, arch) + } + + pub fn with_base(module_base: Address, arch: ArchitectureIdent) -> Result { + trace!("module_base={:x}", module_base); + + let offsets = Win32ArchOffsets::from(arch); + trace!("offsets={:?}", offsets); + + Ok(Win32ModuleListInfo { + module_base, + offsets, + }) + } + + pub fn module_base(&self) -> Address { + self.module_base + } + + pub fn module_entry_list( + &self, + mem: &mut impl AsMut, + arch: ArchitectureIdent, + ) -> Result> { + let mut out = vec![]; + self.module_entry_list_callback(mem, arch, (&mut out).into())?; + Ok(out) + } + + pub fn module_entry_list_callback, V: MemoryView>( + &self, + mem: &mut M, + arch: ArchitectureIdent, + mut callback: AddressCallback, + ) -> Result<()> { + let list_start = self.module_base; + let mut list_entry = list_start; + let arch_obj = arch.into(); + for _ in 0..MAX_ITER_COUNT { + if !callback.call(list_entry) { + break; + } + list_entry = mem.as_mut().read_addr_arch(arch_obj, list_entry)?; + // Break on misaligned entry. On NT 4.0 list end is misaligned, maybe it's a flag? + if list_entry.is_null() + || (list_entry.to_umem() & 0b111) != 0 + || list_entry == self.module_base + { + break; + } + } + + Ok(()) + } + + pub fn module_base_from_entry( + &self, + entry: Address, + mem: &mut impl MemoryView, + arch: ArchitectureIdent, + ) -> Result
{ + mem.read_addr_arch(arch.into(), entry + self.offsets.ldr_data_base) + .map_err(From::from) + } + + pub fn module_info_from_entry( + &self, + entry: Address, + parent_eprocess: Address, + mem: &mut impl MemoryView, + arch: ArchitectureIdent, + ) -> Result { + let base = self.module_base_from_entry(entry, mem, arch)?; + let arch_obj = arch.into(); + + trace!("base={:x}", base); + + let mut size = mem + .read_addr_arch(arch_obj, entry + self.offsets.ldr_data_size)? + .to_umem(); + + trace!("size={:x}", size); + + // If size here is messed up, try to parse it from the module pe file + if size < 0x1000 { + if let Ok(new_size) = crate::kernel::ntos::pehelper::try_get_pe_size(mem, base) { + size = new_size; + trace!("pe size={:x}", size); + } + } + + let path = mem + .read_unicode_string(arch_obj, entry + self.offsets.ldr_data_full_name) + .unwrap_or_else(|_| String::new()); + trace!("path={}", path); + + let name = mem + .read_unicode_string(arch_obj, entry + self.offsets.ldr_data_base_name) + .unwrap_or_else(|_| String::new()); + trace!("name={}", name); + + Ok(ModuleInfo { + address: entry, + parent_process: parent_eprocess, + base, + size, + path: path.into(), + name: name.into(), + arch, + }) + } +} diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/process.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/process.rs new file mode 100644 index 0000000..5e4e1c5 --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/process.rs @@ -0,0 +1,501 @@ +use std::prelude::v1::*; + +use super::{Win32Kernel, Win32ModuleListInfo}; + +use crate::prelude::MmVadOffsetTable; + +use std::fmt; + +use memflow::mem::virt_translate::*; +use memflow::prelude::v1::{Result, *}; + +// those only required when compiling cglue code +#[cfg(feature = "plugins")] +use memflow::cglue; + +use super::Win32VirtualTranslate; + +/// Exit status of a win32 process +pub type Win32ExitStatus = i32; + +/// Process has not exited yet +pub const EXIT_STATUS_STILL_ACTIVE: i32 = 259; + +/// EPROCESS ImageFileName byte length +pub const IMAGE_FILE_NAME_LENGTH: usize = 15; + +#[derive(Debug, Clone)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize))] +pub struct Win32ProcessInfo { + pub base_info: ProcessInfo, + + // general information from eprocess + pub section_base: Address, + pub ethread: Address, + pub wow64: Address, + + // teb + pub teb: Option
, + pub teb_wow64: Option
, + + // peb + pub peb_native: Option
, + pub peb_wow64: Option
, + + // modules + pub module_info_native: Option, + pub module_info_wow64: Option, + + // memory + pub vad_root: Address, +} + +impl Win32ProcessInfo { + pub fn wow64(&self) -> Address { + self.wow64 + } + + pub fn peb(&self) -> Option
{ + if let Some(peb) = self.peb_wow64 { + Some(peb) + } else { + self.peb_native + } + } + + pub fn peb_native(&self) -> Option
{ + self.peb_native + } + + pub fn peb_wow64(&self) -> Option
{ + self.peb_wow64 + } + + /// Return the module list information of process native architecture + /// + /// If the process is a wow64 process, module_info_wow64 is returned, otherwise, module_info_native is + /// returned. + pub fn module_info(&self) -> Option { + if !self.wow64.is_null() { + self.module_info_wow64 + } else { + self.module_info_native + } + } + + pub fn module_info_native(&self) -> Option { + self.module_info_native + } + + pub fn module_info_wow64(&self) -> Option { + self.module_info_wow64 + } + + pub fn translator(&self) -> Win32VirtualTranslate { + Win32VirtualTranslate::new(self.base_info.sys_arch, self.base_info.dtb1) + } +} + +#[cfg(feature = "plugins")] +cglue_impl_group!(Win32Process, ProcessInstance, { VirtualTranslate }); +#[cfg(feature = "plugins")] +cglue_impl_group!(Win32Process, IntoProcessInstance, { VirtualTranslate }); + +pub struct Win32Process { + pub virt_mem: VirtualDma, + pub proc_info: Win32ProcessInfo, + + sysproc_dtb: D, + offset_eproc_exit_status: usize, + mmvad: MmVadOffsetTable, +} + +// TODO: can be removed i think +impl Clone for Win32Process { + fn clone(&self) -> Self { + Self { + virt_mem: self.virt_mem.clone(), + proc_info: self.proc_info.clone(), + sysproc_dtb: self.sysproc_dtb.clone(), + offset_eproc_exit_status: self.offset_eproc_exit_status, + mmvad: self.mmvad, + } + } +} + +impl AsMut> for Win32Process { + fn as_mut(&mut self) -> &mut VirtualDma { + &mut self.virt_mem + } +} + +impl MemoryView + for Win32Process +{ + fn read_raw_iter(&mut self, data: ReadRawMemOps) -> Result<()> { + self.virt_mem.read_raw_iter(data) + } + + fn write_raw_iter(&mut self, data: WriteRawMemOps) -> Result<()> { + self.virt_mem.write_raw_iter(data) + } + + fn metadata(&self) -> MemoryViewMetadata { + self.virt_mem.metadata() + } +} + +impl VirtualTranslate + for Win32Process +{ + fn virt_to_phys_list( + &mut self, + addrs: &[VtopRange], + out: VirtualTranslationCallback, + out_fail: VirtualTranslationFailCallback, + ) { + self.virt_mem.virt_to_phys_list(addrs, out, out_fail) + } +} + +// TODO: implement VAD and rollback to the old bound! +//impl Process for Win32Process { + +impl Process + for Win32Process +{ + /// Retrieves virtual address translator for the process (if applicable) + //fn vat(&mut self) -> Option<&mut Self::VirtualTranslateType>; + + /// Retrieves the state of the process + fn state(&mut self) -> ProcessState { + if let Ok(exit_status) = self.virt_mem.read::( + self.proc_info.base_info.address + self.offset_eproc_exit_status, + ) { + if exit_status == EXIT_STATUS_STILL_ACTIVE { + ProcessState::Alive + } else { + ProcessState::Dead(exit_status) + } + } else { + ProcessState::Unknown + } + } + + /// Changes the dtb this process uses for memory translations + /// + /// # Remarks + /// + /// For memflow-win32 the second parameter should be set to `Address::invalid()`. + fn set_dtb(&mut self, dtb1: Address, _dtb2: Address) -> Result<()> { + self.proc_info.base_info.dtb1 = dtb1; + self.proc_info.base_info.dtb2 = Address::invalid(); + self.virt_mem.set_translator(self.proc_info.translator()); + Ok(()) + } + + /// Walks the process' module list and calls the provided callback for each module + fn module_address_list_callback( + &mut self, + target_arch: Option<&ArchitectureIdent>, + mut callback: ModuleAddressCallback, + ) -> memflow::error::Result<()> { + let infos = [ + ( + self.proc_info.module_info_native, + self.proc_info.base_info.sys_arch, + ), + ( + self.proc_info.module_info_wow64, + self.proc_info.base_info.proc_arch, + ), + ]; + + // Here we end up filtering out module_info_wow64 if it doesn't exist + let iter = infos + .iter() + .filter(|(_, a)| { + if let Some(ta) = target_arch { + a == ta + } else { + true + } + }) + .cloned() + .filter_map(|(info, arch)| info.zip(Some(arch))); + + self.module_address_list_with_infos_callback(iter, &mut callback) + .map_err(From::from) + } + + /// Retrieves a module by its structure address and architecture + /// + /// # Arguments + /// * `address` - address where module's information resides in + /// * `architecture` - architecture of the module. Should be either `ProcessInfo::proc_arch`, or `ProcessInfo::sys_arch`. + fn module_by_address( + &mut self, + address: Address, + architecture: ArchitectureIdent, + ) -> memflow::error::Result { + let info = if architecture == self.proc_info.base_info.sys_arch { + self.proc_info.module_info_native.as_mut() + } else if architecture == self.proc_info.base_info.proc_arch { + self.proc_info.module_info_wow64.as_mut() + } else { + None + } + .ok_or(Error(ErrorOrigin::OsLayer, ErrorKind::InvalidArchitecture))?; + + info.module_info_from_entry( + address, + self.proc_info.base_info.address, + &mut self.virt_mem, + architecture, + ) + .map_err(From::from) + } + + /// Retrieves address of the primary module structure of the process + /// + /// This will be the module of the executable that is being run, and whose name is stored in + /// _EPROCESS::IMAGE_FILE_NAME + fn primary_module_address(&mut self) -> memflow::error::Result
{ + let mut ret = Err(Error(ErrorOrigin::OsLayer, ErrorKind::ModuleNotFound)); + let sptr = self as *mut Self; + let callback = &mut |ModuleAddressInfo { address, arch }| { + let s = unsafe { sptr.as_mut() }.unwrap(); + let info = if arch == s.proc_info.base_info.sys_arch { + s.proc_info.module_info_native.as_mut() + } else { + s.proc_info.module_info_wow64.as_mut() + } + .unwrap(); + + if let Ok((_, true)) = info + .module_base_from_entry(address, &mut s.virt_mem, arch) + .map(|b| (b, b == s.proc_info.section_base)) + { + ret = Ok(address); + false + } else { + true + } + }; + let proc_arch = self.proc_info.base_info.proc_arch; + self.module_address_list_callback(Some(&proc_arch), callback.into())?; + ret + } + + fn module_import_list_callback( + &mut self, + info: &ModuleInfo, + callback: ImportCallback, + ) -> Result<()> { + memflow::os::util::module_import_list_callback(&mut self.virt_mem, info, callback) + } + + fn module_export_list_callback( + &mut self, + info: &ModuleInfo, + callback: ExportCallback, + ) -> Result<()> { + memflow::os::util::module_export_list_callback(&mut self.virt_mem, info, callback) + } + + fn module_section_list_callback( + &mut self, + info: &ModuleInfo, + callback: SectionCallback, + ) -> Result<()> { + memflow::os::util::module_section_list_callback(&mut self.virt_mem, info, callback) + } + + /// Retrieves the process info + fn info(&self) -> &ProcessInfo { + &self.proc_info.base_info + } + + fn mapped_mem_range( + &mut self, + gap_size: imem, + start: Address, + end: Address, + out: MemoryRangeCallback, + ) { + fn _walk_vad( + mem: &mut impl MemoryView, + vad_entry: Address, + offsets: &MmVadOffsetTable, + arch: ArchitectureObj, + start: Address, + end: Address, + out: &mut MemoryRangeCallback, + ) { + if vad_entry.is_null() || start == end { + return; + } + + log::trace!("WALK VAD {vad_entry} {start} {end}"); + + let _ = (move || { + // Older versions of windows store starting/ending VPNs as address ranges without + // the high parts, as opposed to frame numbers + let pfn_mul = if offsets.starting_vpn_high == offsets.ending_vpn_high { + 1 + } else { + 0x1000 + }; + + // TODO: handle starting/ending vpn high values + + let s = mem.read::(vad_entry + offsets.starting_vpn)? as umem; + let s = Address::from(s * pfn_mul); + let e = mem.read::(vad_entry + offsets.ending_vpn)? as umem; + let e = Address::from(e * pfn_mul); + + let sl = mem.read::(vad_entry + offsets.starting_vpn_high)? as umem; + let el = mem.read::(vad_entry + offsets.ending_vpn_high)? as umem; + + let fl = mem.read::(vad_entry + offsets.u)?; + + // Bits are as follows: + // RXW (maybe) + println!("FL {fl:b} | {}", offsets.protection_bit); + + let _r = fl & (0b1 << offsets.protection_bit); + + let fl = fl >> offsets.protection_bit; + + println!("FL {fl:b}"); + + let fl = fl & !(!0u32 << 5); + + println!("S {s} E {e} | {sl:x} {el:x} | {fl:b} {fl}"); + + if (s >= start && s < end) || (e <= end && e > start) { + let left = mem.read_addr_arch(arch, vad_entry + offsets.vad_node)?; + let right = + mem.read_addr_arch(arch, vad_entry + offsets.vad_node + arch.size_addr())?; + + _walk_vad(mem, left, offsets, arch, start, s, out); + + if !out.call(CTup3( + s, + e.to_umem() - s.to_umem() + pfn_mul, + Default::default(), + )) { + return Result::Ok(()); + } + + _walk_vad(mem, right, offsets, arch, e, end, out); + } + + Result::Ok(()) + })(); + } + + /*let mut gap_remover = memflow::types::util::GapRemover::new(out, gap_size, start, end); + + // Temporarily load up the sysproc dtb into the memory view + self.sysproc_dtb = self.virt_mem.set_translator(self.sysproc_dtb); + + let out = &mut |data| { + gap_remover.push_range(data); + true + }; + + let mut out = out.into(); + + _walk_vad( + &mut self.virt_mem, + self.proc_info.vad_root, + &self.mmvad, + self.proc_info.base_info.sys_arch.into(), + start, + end, + &mut out, + ); + + // Load back the original value + self.sysproc_dtb = self.virt_mem.set_translator(self.sysproc_dtb);*/ + + self.virt_mem.virt_page_map_range(gap_size, start, end, out) + } +} + +// TODO: replace the following impls with a dedicated builder +// TODO: add non cloneable thing +impl Win32Process { + pub fn with_kernel(kernel: Win32Kernel, proc_info: Win32ProcessInfo) -> Self { + let mut virt_mem = kernel.virt_mem; + virt_mem.set_proc_arch(proc_info.base_info.proc_arch.into()); + let sysproc_dtb = virt_mem.set_translator(proc_info.translator()); + + Self { + virt_mem, + proc_info, + sysproc_dtb, + mmvad: kernel.offsets.mm_vad(), + offset_eproc_exit_status: kernel.offsets.eproc_exit_status(), + } + } + + /// Consumes this process, returning the underlying memory and vat objects + pub fn into_inner(self) -> (T, V) { + self.virt_mem.into_inner() + } +} + +impl<'a, T: PhysicalMemory, V: VirtualTranslate2> + Win32Process, Fwd<&'a mut V>, Win32VirtualTranslate> +{ + /// Constructs a new process by borrowing a kernel object. + /// + /// Internally this will create a `VirtualDma` object that also + /// borrows the PhysicalMemory and Vat objects from the kernel. + /// + /// The resulting process object is NOT cloneable due to the mutable borrowing. + /// + /// When u need a cloneable Process u have to use the `::with_kernel` function + /// which will move the kernel object. + pub fn with_kernel_ref(kernel: &'a mut Win32Kernel, proc_info: Win32ProcessInfo) -> Self { + let sysproc_dtb = *kernel.virt_mem.translator(); + + let (phys_mem, vat) = kernel.virt_mem.mem_vat_pair(); + let virt_mem = VirtualDma::with_vat( + phys_mem.forward_mut(), + proc_info.base_info.proc_arch, + proc_info.translator(), + vat.forward_mut(), + ); + + Self { + virt_mem, + proc_info, + sysproc_dtb, + mmvad: kernel.offsets.mm_vad(), + offset_eproc_exit_status: kernel.offsets.eproc_exit_status(), + } + } +} + +impl Win32Process { + fn module_address_list_with_infos_callback( + &mut self, + module_infos: impl Iterator, + out: &mut ModuleAddressCallback, + ) -> Result<()> { + for (info, arch) in module_infos { + let callback = &mut |address| out.call(ModuleAddressInfo { address, arch }); + info.module_entry_list_callback(self, arch, callback.into())?; + } + Ok(()) + } +} + +impl fmt::Debug for Win32Process { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self.proc_info) + } +} diff --git a/apex_dma/memflow_lib/memflow-win32/src/win32/unicode_string.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/unicode_string.rs similarity index 56% rename from apex_dma/memflow_lib/memflow-win32/src/win32/unicode_string.rs rename to apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/unicode_string.rs index 3fb2d2c..6f060f1 100644 --- a/apex_dma/memflow_lib/memflow-win32/src/win32/unicode_string.rs +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/unicode_string.rs @@ -1,30 +1,21 @@ use std::prelude::v1::*; -use crate::error::{Error, Result}; - use std::convert::TryInto; use memflow::architecture::{ArchitectureObj, Endianess}; -use memflow::mem::VirtualMemory; +use memflow::error::{Error, ErrorKind, ErrorOrigin, Result}; +use memflow::mem::MemoryView; use memflow::types::Address; use widestring::U16CString; pub trait VirtualReadUnicodeString { - fn virt_read_unicode_string( - &mut self, - proc_arch: ArchitectureObj, - addr: Address, - ) -> Result; + fn read_unicode_string(&mut self, proc_arch: ArchitectureObj, addr: Address) -> Result; } // TODO: split up cpu and proc arch in read_helper.rs -impl<'a, T: VirtualMemory> VirtualReadUnicodeString for T { - fn virt_read_unicode_string( - &mut self, - proc_arch: ArchitectureObj, - addr: Address, - ) -> Result { +impl VirtualReadUnicodeString for T { + fn read_unicode_string(&mut self, proc_arch: ArchitectureObj, addr: Address) -> Result { /* typedef struct _windows_unicode_string32 { uint16_t length; @@ -42,50 +33,51 @@ impl<'a, T: VirtualMemory> VirtualReadUnicodeString for T { // length is always the first entry let mut length = 0u16; - self.virt_read_into(addr, &mut length)?; + self.read_into(addr, &mut length)?; if length == 0 { - return Err(Error::Unicode("unable to read unicode string length")); + return Err(Error(ErrorOrigin::OsLayer, ErrorKind::Encoding) + .log_debug("unable to read unicode string length (length is zero)")); } // TODO: chek if length exceeds limit // buffer is either aligned at 4 or 8 let buffer = match proc_arch.bits() { - 64 => self.virt_read_addr64(addr + 8)?, - 32 => self.virt_read_addr32(addr + 4)?, + 64 => self.read_addr64(addr + 8)?, + 32 => self.read_addr32(addr + 4)?, _ => { - return Err(Error::InvalidArchitecture); + return Err(Error(ErrorOrigin::OsLayer, ErrorKind::InvalidArchitecture)); } }; if buffer.is_null() { - return Err(Error::Unicode("unable to read unicode string length")); + return Err(Error(ErrorOrigin::OsLayer, ErrorKind::Encoding) + .log_debug("unable to read unicode string buffer")); } // check if buffer length is mod 2 (utf-16) if length % 2 != 0 { - return Err(Error::Unicode( - "unicode string length is not a multiple of two", - )); + return Err(Error(ErrorOrigin::OsLayer, ErrorKind::Encoding) + .log_debug("unicode string length is not a multiple of two")); } // read buffer let mut content = vec![0; length as usize + 2]; - self.virt_read_raw_into(buffer, &mut content)?; + self.read_raw_into(buffer, &mut content)?; content[length as usize] = 0; content[length as usize + 1] = 0; - // TODO: check length % 2 == 0 - let content16 = content .chunks_exact(2) - .map(|b| b[0..2].try_into().map_err(|_| Error::Bounds)) + .map(|b| { + b[0..2] + .try_into() + .map_err(|_| Error(ErrorOrigin::OsLayer, ErrorKind::Encoding)) + }) .filter_map(Result::ok) .map(|b| match proc_arch.endianess() { Endianess::LittleEndian => u16::from_le_bytes(b), Endianess::BigEndian => u16::from_be_bytes(b), }) .collect::>(); - Ok(U16CString::from_vec_with_nul(content16) - .map_err(|_| Error::Encoding)? - .to_string_lossy()) + Ok(U16CString::from_vec_truncate(content16).to_string_lossy()) } } diff --git a/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/vat.rs b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/vat.rs new file mode 100644 index 0000000..7752c3e --- /dev/null +++ b/apex_dma/memflow_lib/memflow-win32/memflow-win32/src/win32/vat.rs @@ -0,0 +1,65 @@ +use memflow::{ + architecture::{arm, x86, ArchitectureIdent, ArchitectureObj}, + cglue::tuple::*, + iter::SplitAtIndex, + mem::{ + MemoryView, PhysicalMemory, VirtualDma, VirtualTranslate2, VirtualTranslate3, + VtopFailureCallback, VtopOutputCallback, + }, + types::{umem, Address}, +}; + +#[derive(Debug, Clone, Copy)] +pub struct Win32VirtualTranslate { + pub sys_arch: ArchitectureObj, + pub dtb: Address, +} + +impl Win32VirtualTranslate { + pub fn new(arch: ArchitectureIdent, dtb: Address) -> Self { + Self { + sys_arch: arch.into(), + dtb, + } + } + + pub fn virt_mem( + self, + mem: T, + vat: V, + proc_arch: ArchitectureObj, + ) -> impl MemoryView { + VirtualDma::with_vat(mem, proc_arch, self, vat) + } +} + +impl VirtualTranslate3 for Win32VirtualTranslate { + fn virt_to_phys_iter< + T: PhysicalMemory + ?Sized, + B: SplitAtIndex, + VI: Iterator>, + >( + &self, + mem: &mut T, + addrs: VI, + out: &mut VtopOutputCallback, + out_fail: &mut VtopFailureCallback, + tmp_buf: &mut [std::mem::MaybeUninit], + ) { + if let Ok(translator) = x86::new_translator(self.dtb, self.sys_arch) { + translator.virt_to_phys_iter(mem, addrs, out, out_fail, tmp_buf) + } else if let Ok(translator) = arm::new_translator_nonsplit(self.dtb, self.sys_arch) { + translator.virt_to_phys_iter(mem, addrs, out, out_fail, tmp_buf) + } else { + panic!("Invalid architecture"); + } + } + + fn translation_table_id(&self, _address: Address) -> umem { + self.dtb.to_umem().overflowing_shr(12).0 + } + + fn arch(&self) -> ArchitectureObj { + self.sys_arch + } +} diff --git a/apex_dma/memflow_lib/memflow-win32/offsets/5_2_3790_X64_82DCF67A38274C9CA99B60B421D2786D2.toml b/apex_dma/memflow_lib/memflow-win32/offsets/5_2_3790_X64_82DCF67A38274C9CA99B60B421D2786D2.toml deleted file mode 100644 index 52e6ab9..0000000 --- a/apex_dma/memflow_lib/memflow-win32/offsets/5_2_3790_X64_82DCF67A38274C9CA99B60B421D2786D2.toml +++ /dev/null @@ -1,21 +0,0 @@ -pdb_guid = '82DCF67A38274C9CA99B60B421D2786D2' -nt_major_version = 5 -nt_minor_version = 2 -nt_build_number = 3790 -arch = 'X64' - -[offsets] -list_blink = 8 -eproc_link = 224 -kproc_dtb = 40 -eproc_pid = 216 -eproc_name = 616 -eproc_peb = 704 -eproc_section_base = 0x128 -eproc_exit_status = 0x024C -eproc_thread_list = 656 -eproc_wow64 = 680 -kthread_teb = 176 -ethread_list_entry = 976 -teb_peb = 96 -teb_peb_x86 = 48 diff --git a/apex_dma/memflow_lib/memflow-win32/src/error.rs b/apex_dma/memflow_lib/memflow-win32/src/error.rs deleted file mode 100644 index 311eff1..0000000 --- a/apex_dma/memflow_lib/memflow-win32/src/error.rs +++ /dev/null @@ -1,126 +0,0 @@ -use std::prelude::v1::*; - -use std::{convert, fmt, result, str}; - -#[cfg(feature = "std")] -use std::error; - -// forward declare partial result extension from core for easier access -pub use memflow::error::PartialResultExt; - -#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] -pub enum Error { - /// Generic error type containing a string - Other(&'static str), - /// Out of bounds. - /// - /// Catch-all for bounds check errors. - Bounds, - /// Invalid Architecture error. - /// - /// The architecture provided is not a valid argument for the given function. - InvalidArchitecture, - Initialization(&'static str), - SymbolStore(&'static str), - ProcessInfo, - ModuleInfo, - /// memflow core error. - /// - /// Catch-all for memflow core related errors. - Core(memflow::error::Error), - PDB(&'static str), - /// PE error. - /// - /// Catch-all for pe related errors. - PE(pelite::Error), - /// Encoding error. - /// - /// Catch-all for string related errors such as lacking a nul terminator. - Encoding, - /// Unicode error when reading a string from windows. - /// - /// Encapsulates all unicode related reading errors. - Unicode(&'static str), -} - -/// Convert from &str to error -impl convert::From<&'static str> for Error { - fn from(error: &'static str) -> Self { - Error::Other(error) - } -} - -/// Convert from flow_core::Error -impl From for Error { - fn from(error: memflow::error::Error) -> Error { - Error::Core(error) - } -} - -/// Convert from flow_core::PartialError -impl From> for Error { - fn from(_error: memflow::error::PartialError) -> Error { - Error::Core(memflow::error::Error::Partial) - } -} - -/// Convert from pelite::Error -impl From for Error { - fn from(error: pelite::Error) -> Error { - Error::PE(error) - } -} - -/// Convert from str::Utf8Error -impl From for Error { - fn from(_err: str::Utf8Error) -> Error { - Error::Encoding - } -} - -impl Error { - /// Returns a tuple representing the error description and its string value. - pub fn to_str_pair(self) -> (&'static str, Option<&'static str>) { - match self { - Error::Other(e) => ("other error", Some(e)), - Error::Bounds => ("out of bounds", None), - Error::InvalidArchitecture => ("invalid architecture", None), - Error::Initialization(e) => ("error during initialization", Some(e)), - Error::SymbolStore(e) => ("error in symbol store", Some(e)), - Error::ProcessInfo => ("error retrieving process info", None), - Error::ModuleInfo => ("error retrieving module info", None), - Error::Core(e) => e.to_str_pair(), - Error::PDB(e) => ("error handling pdb", Some(e)), - Error::PE(e) => ("error handling pe", Some(e.to_str())), - Error::Encoding => ("encoding error", None), - Error::Unicode(e) => ("error reading unicode string", Some(e)), - } - } - - /// Returns a simple string representation of the error. - pub fn to_str(self) -> &'static str { - self.to_str_pair().0 - } -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let (desc, value) = self.to_str_pair(); - - if let Some(value) = value { - write!(f, "{}: {}", desc, value) - } else { - f.write_str(desc) - } - } -} - -#[cfg(feature = "std")] -impl error::Error for Error { - fn description(&self) -> &str { - self.to_str() - } -} - -/// Specialized `Result` type for memflow_win32 errors. -pub type Result = result::Result; diff --git a/apex_dma/memflow_lib/memflow-win32/src/kernel/ntos/pehelper.rs b/apex_dma/memflow_lib/memflow-win32/src/kernel/ntos/pehelper.rs deleted file mode 100644 index 9394959..0000000 --- a/apex_dma/memflow_lib/memflow-win32/src/kernel/ntos/pehelper.rs +++ /dev/null @@ -1,57 +0,0 @@ -use std::prelude::v1::*; - -use crate::error::{Error, Result}; - -use log::{debug, info}; - -use memflow::error::PartialResultExt; -use memflow::mem::VirtualMemory; -use memflow::types::{size, Address}; - -use pelite::{self, PeView}; - -pub fn try_get_pe_size(virt_mem: &mut T, probe_addr: Address) -> Result { - let mut probe_buf = vec![0; size::kb(4)]; - virt_mem.virt_read_raw_into(probe_addr, &mut probe_buf)?; - - let pe_probe = PeView::from_bytes(&probe_buf).map_err(Error::PE)?; - - let opt_header = pe_probe.optional_header(); - let size_of_image = match opt_header { - pelite::Wrap::T32(opt32) => opt32.SizeOfImage, - pelite::Wrap::T64(opt64) => opt64.SizeOfImage, - }; - if size_of_image > 0 { - debug!( - "found pe header for image with a size of {} bytes.", - size_of_image - ); - Ok(size_of_image as usize) - } else { - Err(Error::Initialization("pe size_of_image is zero")) - } -} - -pub fn try_get_pe_image( - virt_mem: &mut T, - probe_addr: Address, -) -> Result> { - let size_of_image = try_get_pe_size(virt_mem, probe_addr)?; - virt_mem - .virt_read_raw(probe_addr, size_of_image) - .data_part() - .map_err(Error::Core) -} - -pub fn try_get_pe_name(virt_mem: &mut T, probe_addr: Address) -> Result { - let image = try_get_pe_image(virt_mem, probe_addr)?; - let pe = PeView::from_bytes(&image).map_err(Error::PE)?; - let name = pe - .exports() - .map_err(|_| Error::Initialization("unable to get exports"))? - .dll_name() - .map_err(|_| Error::Initialization("unable to get dll name"))? - .to_str()?; - info!("try_get_pe_name: found pe header for {}", name); - Ok(name.to_string()) -} diff --git a/apex_dma/memflow_lib/memflow-win32/src/kernel/ntos/x64.rs b/apex_dma/memflow_lib/memflow-win32/src/kernel/ntos/x64.rs deleted file mode 100644 index b7935cf..0000000 --- a/apex_dma/memflow_lib/memflow-win32/src/kernel/ntos/x64.rs +++ /dev/null @@ -1,106 +0,0 @@ -use std::prelude::v1::*; - -use super::pehelper; -use crate::error::{Error, Result}; -use crate::kernel::StartBlock; - -use log::{debug, trace}; - -use memflow::architecture::x86::x64; -use memflow::error::PartialResultExt; -use memflow::iter::PageChunks; -use memflow::mem::VirtualMemory; -use memflow::types::{size, Address}; - -use dataview::Pod; -use pelite::image::IMAGE_DOS_HEADER; - -pub fn find_with_va_hint( - virt_mem: &mut T, - start_block: &StartBlock, -) -> Result<(Address, usize)> { - debug!( - "x64::find_with_va_hint: trying to find ntoskrnl.exe with va hint at {:x}", - start_block.kernel_hint.as_u64() - ); - - // va was found previously - let mut va_base = start_block.kernel_hint.as_u64() & !0x0001_ffff; - while va_base + size::mb(16) as u64 > start_block.kernel_hint.as_u64() { - trace!("x64::find_with_va_hint: probing at {:x}", va_base); - - match find_with_va(virt_mem, va_base) { - Ok(a) => { - let addr = Address::from(a); - let size_of_image = pehelper::try_get_pe_size(virt_mem, addr)?; - return Ok((addr, size_of_image)); - } - Err(e) => trace!("x64::find_with_va_hint: probe error {:?}", e), - } - - va_base -= size::mb(2) as u64; - } - - Err(Error::Initialization( - "x64::find_with_va_hint: unable to locate ntoskrnl.exe via va hint", - )) -} - -fn find_with_va(virt_mem: &mut T, va_base: u64) -> Result { - let mut buf = vec![0; size::mb(2)]; - virt_mem - .virt_read_raw_into(Address::from(va_base), &mut buf) - .data_part()?; - - buf.chunks_exact(x64::ARCH.page_size()) - .enumerate() - .map(|(i, c)| { - let view = Pod::as_data_view(&c[..]); - (i, c, view.copy::(0)) // TODO: potential endian mismatch - }) - .filter(|(_, _, p)| p.e_magic == 0x5a4d) // MZ - .filter(|(_, _, p)| p.e_lfanew <= 0x800) - .inspect(|(i, _, _)| { - trace!( - "x64::find_with_va: found potential header flags at offset {:x}", - i * x64::ARCH.page_size() - ) - }) - .find(|(i, _, _)| { - let probe_addr = Address::from(va_base + (*i as u64) * x64::ARCH.page_size() as u64); - let name = pehelper::try_get_pe_name(virt_mem, probe_addr).unwrap_or_default(); - name == "ntoskrnl.exe" - }) - .map(|(i, _, _)| va_base + i as u64 * x64::ARCH.page_size() as u64) - .ok_or_else(|| Error::Initialization("unable to locate ntoskrnl.exe")) -} - -pub fn find( - virt_mem: &mut T, - start_block: &StartBlock, -) -> Result<(Address, usize)> { - debug!("x64::find: trying to find ntoskrnl.exe with page map",); - - let page_map = virt_mem.virt_page_map_range( - size::mb(2), - (!0u64 - (1u64 << (start_block.arch.address_space_bits() - 1))).into(), - (!0u64).into(), - ); - - match page_map - .into_iter() - .flat_map(|(va, size)| size.page_chunks(va, size::mb(2))) - .filter(|&(_, size)| size == size::mb(2)) - .filter_map(|(va, _)| find_with_va(virt_mem, va.as_u64()).ok()) - .next() - { - Some(a) => { - let addr = Address::from(a); - let size_of_image = pehelper::try_get_pe_size(virt_mem, addr)?; - Ok((addr, size_of_image)) - } - None => Err(Error::Initialization( - "x64::find: unable to locate ntoskrnl.exe with a page map", - )), - } -} diff --git a/apex_dma/memflow_lib/memflow-win32/src/kernel/start_block.rs b/apex_dma/memflow_lib/memflow-win32/src/kernel/start_block.rs deleted file mode 100644 index 5838ae9..0000000 --- a/apex_dma/memflow_lib/memflow-win32/src/kernel/start_block.rs +++ /dev/null @@ -1,75 +0,0 @@ -mod x64; -mod x86; -mod x86pae; - -use std::prelude::v1::*; - -use crate::error::{Error, Result}; - -use log::warn; - -use memflow::architecture; -use memflow::architecture::ArchitectureObj; -use memflow::mem::PhysicalMemory; -use memflow::types::{size, Address, PhysicalAddress}; - -// PROCESSOR_START_BLOCK -#[derive(Debug, Copy, Clone)] -#[cfg_attr(feature = "serde", derive(::serde::Serialize))] -pub struct StartBlock { - pub arch: ArchitectureObj, - pub kernel_hint: Address, - pub dtb: Address, -} - -pub fn find_fallback(mem: &mut T, arch: ArchitectureObj) -> Result { - if arch == architecture::x86::x64::ARCH { - // read low 16mb stub - let mut low16m = vec![0; size::mb(16)]; - mem.phys_read_raw_into(PhysicalAddress::NULL, &mut low16m)?; - - x64::find(&low16m) - } else { - Err(Error::Initialization( - "start_block: fallback not implemented for given arch", - )) - } -} - -// bcdedit /set firstmegabytepolicyuseall -pub fn find(mem: &mut T, arch: Option) -> Result { - if let Some(arch) = arch { - if arch == architecture::x86::x64::ARCH { - // read low 1mb stub - let mut low1m = vec![0; size::mb(1)]; - mem.phys_read_raw_into(PhysicalAddress::NULL, &mut low1m)?; - - // find x64 dtb in low stub < 1M - match x64::find_lowstub(&low1m) { - Ok(d) => { - if d.dtb.as_u64() != 0 { - return Ok(d); - } - } - Err(e) => warn!("x64::find_lowstub() error: {}", e), - } - - find_fallback(mem, arch) - } else if arch == architecture::x86::x32_pae::ARCH { - let mut low16m = vec![0; size::mb(16)]; - mem.phys_read_raw_into(PhysicalAddress::NULL, &mut low16m)?; - x86pae::find(&low16m) - } else if arch == architecture::x86::x32::ARCH { - let mut low16m = vec![0; size::mb(16)]; - mem.phys_read_raw_into(PhysicalAddress::NULL, &mut low16m)?; - x86::find(&low16m) - } else { - Err(Error::InvalidArchitecture) - } - } else { - find(mem, Some(architecture::x86::x64::ARCH)) - .or_else(|_| find(mem, Some(architecture::x86::x32_pae::ARCH))) - .or_else(|_| find(mem, Some(architecture::x86::x32::ARCH))) - .map_err(|_| Error::Initialization("unable to find dtb")) - } -} diff --git a/apex_dma/memflow_lib/memflow-win32/src/offsets/mod.rs b/apex_dma/memflow_lib/memflow-win32/src/offsets/mod.rs deleted file mode 100644 index 2918310..0000000 --- a/apex_dma/memflow_lib/memflow-win32/src/offsets/mod.rs +++ /dev/null @@ -1,330 +0,0 @@ -pub mod builder; -pub use builder::Win32OffsetBuilder; - -#[cfg(feature = "symstore")] -pub mod pdb_struct; -#[cfg(feature = "symstore")] -pub mod symstore; - -pub mod offset_table; -#[doc(hidden)] -pub use offset_table::{Win32OffsetFile, Win32OffsetTable, Win32OffsetsArchitecture}; - -#[cfg(feature = "symstore")] -pub use {pdb_struct::PdbStruct, symstore::*}; - -use std::prelude::v1::*; - -#[cfg(feature = "std")] -use std::{fs::File, io::Read, path::Path}; - -use crate::error::{Error, Result}; -use crate::kernel::Win32GUID; -use memflow::architecture::{self, ArchitectureObj}; - -#[derive(Debug, Copy, Clone)] -#[repr(C)] -#[cfg_attr(feature = "serde", derive(::serde::Serialize))] -pub struct Win32ArchOffsets { - pub peb_ldr: usize, // _PEB::Ldr - pub ldr_list: usize, // _PEB_LDR_DATA::InLoadOrderModuleList - pub ldr_data_base: usize, // _LDR_DATA_TABLE_ENTRY::DllBase - pub ldr_data_size: usize, // _LDR_DATA_TABLE_ENTRY::SizeOfImage - pub ldr_data_full_name: usize, // _LDR_DATA_TABLE_ENTRY::FullDllName - pub ldr_data_base_name: usize, // _LDR_DATA_TABLE_ENTRY::BaseDllName -} - -pub const X86: Win32ArchOffsets = Win32ArchOffsets { - peb_ldr: 0xc, - ldr_list: 0xc, - ldr_data_base: 0x18, - ldr_data_size: 0x20, - ldr_data_full_name: 0x24, - ldr_data_base_name: 0x2c, -}; - -pub const X64: Win32ArchOffsets = Win32ArchOffsets { - peb_ldr: 0x18, - ldr_list: 0x10, - ldr_data_base: 0x30, - ldr_data_size: 0x40, - ldr_data_full_name: 0x48, - ldr_data_base_name: 0x58, -}; - -impl Win32OffsetsArchitecture { - #[inline] - fn offsets(&self) -> &'static Win32ArchOffsets { - match self { - Win32OffsetsArchitecture::X64 => &X64, - Win32OffsetsArchitecture::X86 => &X86, - Win32OffsetsArchitecture::AArch64 => panic!("Not implemented"), - } - } -} - -impl From for Win32ArchOffsets { - fn from(arch: ArchitectureObj) -> Win32ArchOffsets { - *Win32OffsetsArchitecture::from(arch).offsets() - } -} - -#[repr(transparent)] -#[derive(Debug, Clone)] -#[cfg_attr(feature = "serde", derive(::serde::Serialize))] -pub struct Win32Offsets(pub Win32OffsetTable); - -impl From for Win32Offsets { - fn from(other: Win32OffsetTable) -> Self { - Self { 0: other } - } -} - -impl From for Win32OffsetTable { - fn from(other: Win32Offsets) -> Self { - other.0 - } -} - -impl From for Win32OffsetsArchitecture { - fn from(arch: ArchitectureObj) -> Win32OffsetsArchitecture { - if arch == architecture::x86::x32::ARCH || arch == architecture::x86::x32_pae::ARCH { - Self::X86 - } else if arch == architecture::x86::x64::ARCH { - Self::X64 - } else { - // We do not have AArch64, but that is in the plans... - panic!("Invalid architecture specified") - } - } -} - -impl Win32Offsets { - #[cfg(feature = "symstore")] - pub fn from_pdb>(pdb_path: P) -> Result { - let mut file = File::open(pdb_path) - .map_err(|_| Error::PDB("unable to open user-supplied pdb file"))?; - let mut buffer = Vec::new(); - file.read_to_end(&mut buffer) - .map_err(|_| Error::PDB("unable to read user-supplied pdb file"))?; - Self::from_pdb_slice(&buffer[..]) - } - - #[cfg(feature = "symstore")] - pub fn from_pdb_slice(pdb_slice: &[u8]) -> Result { - let list = PdbStruct::with(pdb_slice, "_LIST_ENTRY") - .map_err(|_| Error::PDB("_LIST_ENTRY not found"))?; - let kproc = PdbStruct::with(pdb_slice, "_KPROCESS") - .map_err(|_| Error::PDB("_KPROCESS not found"))?; - let eproc = PdbStruct::with(pdb_slice, "_EPROCESS") - .map_err(|_| Error::PDB("_EPROCESS not found"))?; - let ethread = - PdbStruct::with(pdb_slice, "_ETHREAD").map_err(|_| Error::PDB("_ETHREAD not found"))?; - let kthread = - PdbStruct::with(pdb_slice, "_KTHREAD").map_err(|_| Error::PDB("_KTHREAD not found"))?; - let teb = PdbStruct::with(pdb_slice, "_TEB").map_err(|_| Error::PDB("_TEB not found"))?; - - let list_blink = list - .find_field("Blink") - .ok_or_else(|| Error::PDB("_LIST_ENTRY::Blink not found"))? - .offset as _; - - let eproc_link = eproc - .find_field("ActiveProcessLinks") - .ok_or_else(|| Error::PDB("_EPROCESS::ActiveProcessLinks not found"))? - .offset as _; - - let kproc_dtb = kproc - .find_field("DirectoryTableBase") - .ok_or_else(|| Error::PDB("_KPROCESS::DirectoryTableBase not found"))? - .offset as _; - let eproc_pid = eproc - .find_field("UniqueProcessId") - .ok_or_else(|| Error::PDB("_EPROCESS::UniqueProcessId not found"))? - .offset as _; - let eproc_name = eproc - .find_field("ImageFileName") - .ok_or_else(|| Error::PDB("_EPROCESS::ImageFileName not found"))? - .offset as _; - let eproc_peb = eproc - .find_field("Peb") - .ok_or_else(|| Error::PDB("_EPROCESS::Peb not found"))? - .offset as _; - let eproc_section_base = eproc - .find_field("SectionBaseAddress") - .ok_or_else(|| Error::PDB("_EPROCESS::SectionBaseAddress not found"))? - .offset as _; - let eproc_exit_status = eproc - .find_field("ExitStatus") - .ok_or_else(|| Error::PDB("_EPROCESS::ExitStatus not found"))? - .offset as _; - let eproc_thread_list = eproc - .find_field("ThreadListHead") - .ok_or_else(|| Error::PDB("_EPROCESS::ThreadListHead not found"))? - .offset as _; - - // windows 10 uses an uppercase W whereas older windows versions (windows 7) uses a lowercase w - let eproc_wow64 = match eproc - .find_field("WoW64Process") - .or_else(|| eproc.find_field("Wow64Process")) - { - Some(f) => f.offset as _, - None => 0, - }; - - // threads - let kthread_teb = kthread - .find_field("Teb") - .ok_or_else(|| Error::PDB("_KTHREAD::Teb not found"))? - .offset as _; - let ethread_list_entry = ethread - .find_field("ThreadListEntry") - .ok_or_else(|| Error::PDB("_ETHREAD::ThreadListEntry not found"))? - .offset as _; - let teb_peb = teb - .find_field("ProcessEnvironmentBlock") - .ok_or_else(|| Error::PDB("_TEB::ProcessEnvironmentBlock not found"))? - .offset as _; - let teb_peb_x86 = if let Ok(teb32) = - PdbStruct::with(pdb_slice, "_TEB32").map_err(|_| Error::PDB("_TEB32 not found")) - { - teb32 - .find_field("ProcessEnvironmentBlock") - .ok_or_else(|| Error::PDB("_TEB32::ProcessEnvironmentBlock not found"))? - .offset as _ - } else { - 0 - }; - - Ok(Self { - 0: Win32OffsetTable { - list_blink, - eproc_link, - - kproc_dtb, - - eproc_pid, - eproc_name, - eproc_peb, - eproc_section_base, - eproc_exit_status, - eproc_thread_list, - eproc_wow64, - - kthread_teb, - ethread_list_entry, - teb_peb, - teb_peb_x86, - }, - }) - } - - /// _LIST_ENTRY::Blink offset - pub fn list_blink(&self) -> usize { - self.0.list_blink as usize - } - /// _LIST_ENTRY::Flink offset - pub fn eproc_link(&self) -> usize { - self.0.eproc_link as usize - } - - /// _KPROCESS::DirectoryTableBase offset - /// Exists since version 3.10 - pub fn kproc_dtb(&self) -> usize { - self.0.kproc_dtb as usize - } - /// _EPROCESS::UniqueProcessId offset - /// Exists since version 3.10 - pub fn eproc_pid(&self) -> usize { - self.0.eproc_pid as usize - } - /// _EPROCESS::ImageFileName offset - /// Exists since version 3.10 - pub fn eproc_name(&self) -> usize { - self.0.eproc_name as usize - } - /// _EPROCESS::Peb offset - /// Exists since version 5.10 - pub fn eproc_peb(&self) -> usize { - self.0.eproc_peb as usize - } - /// _EPROCESS::SectionBaseAddress offset - /// Exists since version 3.10 - pub fn eproc_section_base(&self) -> usize { - self.0.eproc_section_base as usize - } - /// _EPROCESS::ExitStatus offset - /// Exists since version 3.10 - pub fn eproc_exit_status(&self) -> usize { - self.0.eproc_exit_status as usize - } - /// _EPROCESS::ThreadListHead offset - /// Exists since version 5.10 - pub fn eproc_thread_list(&self) -> usize { - self.0.eproc_thread_list as usize - } - /// _EPROCESS::WoW64Process offset - /// Exists since version 5.0 - pub fn eproc_wow64(&self) -> usize { - self.0.eproc_wow64 as usize - } - - /// _KTHREAD::Teb offset - /// Exists since version 6.2 - pub fn kthread_teb(&self) -> usize { - self.0.kthread_teb as usize - } - /// _ETHREAD::ThreadListEntry offset - /// Exists since version 6.2 - pub fn ethread_list_entry(&self) -> usize { - self.0.ethread_list_entry as usize - } - /// _TEB::ProcessEnvironmentBlock offset - /// Exists since version x.x - pub fn teb_peb(&self) -> usize { - self.0.teb_peb as usize - } - /// _TEB32::ProcessEnvironmentBlock offset - /// Exists since version x.x - pub fn teb_peb_x86(&self) -> usize { - self.0.teb_peb_x86 as usize - } - - pub fn builder() -> Win32OffsetBuilder { - Win32OffsetBuilder::default() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn download_pdb() { - let guid = Win32GUID { - file_name: "ntkrnlmp.pdb".to_string(), - guid: "3844DBB920174967BE7AA4A2C20430FA2".to_string(), - }; - let offsets = Win32Offsets::builder() - .symbol_store(SymbolStore::new().no_cache()) - .guid(guid) - .build() - .unwrap(); - - assert_eq!(offsets.0.list_blink, 8); - assert_eq!(offsets.0.eproc_link, 392); - - assert_eq!(offsets.0.kproc_dtb, 40); - - assert_eq!(offsets.0.eproc_pid, 384); - assert_eq!(offsets.0.eproc_name, 736); - assert_eq!(offsets.0.eproc_peb, 824); - assert_eq!(offsets.0.eproc_thread_list, 776); - assert_eq!(offsets.0.eproc_wow64, 800); - - assert_eq!(offsets.0.kthread_teb, 184); - assert_eq!(offsets.0.ethread_list_entry, 1056); - assert_eq!(offsets.0.teb_peb, 96); - assert_eq!(offsets.0.teb_peb_x86, 48); - } -} diff --git a/apex_dma/memflow_lib/memflow-win32/src/win32/kernel.rs b/apex_dma/memflow_lib/memflow-win32/src/win32/kernel.rs deleted file mode 100644 index 0bf8a03..0000000 --- a/apex_dma/memflow_lib/memflow-win32/src/win32/kernel.rs +++ /dev/null @@ -1,530 +0,0 @@ -use std::prelude::v1::*; - -use super::{ - process::EXIT_STATUS_STILL_ACTIVE, process::IMAGE_FILE_NAME_LENGTH, KernelBuilder, KernelInfo, - Win32ExitStatus, Win32ModuleListInfo, Win32Process, Win32ProcessInfo, Win32VirtualTranslate, -}; - -use crate::error::{Error, Result}; -use crate::offsets::Win32Offsets; - -use log::{info, trace}; -use std::fmt; - -use memflow::architecture::x86; -use memflow::mem::{DirectTranslate, PhysicalMemory, VirtualDMA, VirtualMemory, VirtualTranslate}; -use memflow::process::{OperatingSystem, OsProcessInfo, OsProcessModuleInfo, PID}; -use memflow::types::Address; - -use pelite::{self, pe64::exports::Export, PeView}; - -const MAX_ITER_COUNT: usize = 65536; - -#[derive(Clone)] -pub struct Kernel { - pub phys_mem: T, - pub vat: V, - pub offsets: Win32Offsets, - - pub kernel_info: KernelInfo, - pub sysproc_dtb: Address, -} - -impl OperatingSystem for Kernel {} - -impl Kernel { - pub fn new( - mut phys_mem: T, - mut vat: V, - offsets: Win32Offsets, - kernel_info: KernelInfo, - ) -> Self { - // start_block only contains the winload's dtb which might - // be different to the one used in the actual kernel. - // In case of a failure this will fall back to the winload dtb. - let sysproc_dtb = { - let mut reader = VirtualDMA::with_vat( - &mut phys_mem, - kernel_info.start_block.arch, - Win32VirtualTranslate::new( - kernel_info.start_block.arch, - kernel_info.start_block.dtb, - ), - &mut vat, - ); - - if let Ok(dtb) = reader.virt_read_addr_arch( - kernel_info.start_block.arch, - kernel_info.eprocess_base + offsets.kproc_dtb(), - ) { - dtb - } else { - kernel_info.start_block.dtb - } - }; - info!("sysproc_dtb={:x}", sysproc_dtb); - - Self { - phys_mem, - vat, - offsets, - - kernel_info, - sysproc_dtb, - } - } - - /// Consume the self object and return the containing memory connection - pub fn destroy(self) -> T { - self.phys_mem - } - - pub fn eprocess_list(&mut self) -> Result> { - let mut eprocs = Vec::new(); - self.eprocess_list_extend(&mut eprocs)?; - trace!("found {} eprocesses", eprocs.len()); - Ok(eprocs) - } - - pub fn eprocess_list_extend>(&mut self, eprocs: &mut E) -> Result<()> { - // TODO: create a VirtualDMA constructor for kernel_info - let mut reader = VirtualDMA::with_vat( - &mut self.phys_mem, - self.kernel_info.start_block.arch, - Win32VirtualTranslate::new(self.kernel_info.start_block.arch, self.sysproc_dtb), - &mut self.vat, - ); - - let list_start = self.kernel_info.eprocess_base + self.offsets.eproc_link(); - let mut list_entry = list_start; - - for _ in 0..MAX_ITER_COUNT { - let eprocess = list_entry - self.offsets.eproc_link(); - trace!("eprocess={}", eprocess); - - // test flink + blink before adding the process - let flink_entry = - reader.virt_read_addr_arch(self.kernel_info.start_block.arch, list_entry)?; - trace!("flink_entry={}", flink_entry); - let blink_entry = reader.virt_read_addr_arch( - self.kernel_info.start_block.arch, - list_entry + self.offsets.list_blink(), - )?; - trace!("blink_entry={}", blink_entry); - - if flink_entry.is_null() - || blink_entry.is_null() - || flink_entry == list_start - || flink_entry == list_entry - { - break; - } - - trace!("found eprocess {:x}", eprocess); - eprocs.extend(Some(eprocess).into_iter()); - - // continue - list_entry = flink_entry; - } - - Ok(()) - } - - pub fn kernel_process_info(&mut self) -> Result { - // TODO: create a VirtualDMA constructor for kernel_info - let mut reader = VirtualDMA::with_vat( - &mut self.phys_mem, - self.kernel_info.start_block.arch, - Win32VirtualTranslate::new(self.kernel_info.start_block.arch, self.sysproc_dtb), - &mut self.vat, - ); - - // TODO: cache pe globally - // find PsLoadedModuleList - let loaded_module_list = { - let image = - reader.virt_read_raw(self.kernel_info.kernel_base, self.kernel_info.kernel_size)?; - let pe = PeView::from_bytes(&image).map_err(Error::PE)?; - match pe - .get_export_by_name("PsLoadedModuleList") - .map_err(Error::PE)? - { - Export::Symbol(s) => self.kernel_info.kernel_base + *s as usize, - Export::Forward(_) => { - return Err(Error::Other( - "PsLoadedModuleList found but it was a forwarded export", - )) - } - } - }; - - let kernel_modules = - reader.virt_read_addr_arch(self.kernel_info.start_block.arch, loaded_module_list)?; - - Ok(Win32ProcessInfo { - address: self.kernel_info.kernel_base, - - pid: 0, - name: "ntoskrnl.exe".to_string(), - dtb: self.sysproc_dtb, - section_base: Address::NULL, // TODO: see below - exit_status: EXIT_STATUS_STILL_ACTIVE, - ethread: Address::NULL, // TODO: see below - wow64: Address::NULL, - - teb: None, - teb_wow64: None, - - peb_native: Address::NULL, - peb_wow64: None, - - module_info_native: Win32ModuleListInfo::with_base( - kernel_modules, - self.kernel_info.start_block.arch, - )?, - module_info_wow64: None, - - sys_arch: self.kernel_info.start_block.arch, - proc_arch: self.kernel_info.start_block.arch, - }) - } - - pub fn process_info_from_eprocess(&mut self, eprocess: Address) -> Result { - // TODO: create a VirtualDMA constructor for kernel_info - let mut reader = VirtualDMA::with_vat( - &mut self.phys_mem, - self.kernel_info.start_block.arch, - Win32VirtualTranslate::new(self.kernel_info.start_block.arch, self.sysproc_dtb), - &mut self.vat, - ); - - let pid: PID = reader.virt_read(eprocess + self.offsets.eproc_pid())?; - trace!("pid={}", pid); - - let name = - reader.virt_read_cstr(eprocess + self.offsets.eproc_name(), IMAGE_FILE_NAME_LENGTH)?; - trace!("name={}", name); - - let dtb = reader.virt_read_addr_arch( - self.kernel_info.start_block.arch, - eprocess + self.offsets.kproc_dtb(), - )?; - trace!("dtb={:x}", dtb); - - let wow64 = if self.offsets.eproc_wow64() == 0 { - trace!("eproc_wow64=null; skipping wow64 detection"); - Address::null() - } else { - trace!( - "eproc_wow64={:x}; trying to read wow64 pointer", - self.offsets.eproc_wow64() - ); - reader.virt_read_addr_arch( - self.kernel_info.start_block.arch, - eprocess + self.offsets.eproc_wow64(), - )? - }; - trace!("wow64={:x}", wow64); - - // determine process architecture - let sys_arch = self.kernel_info.start_block.arch; - trace!("sys_arch={:?}", sys_arch); - let proc_arch = match sys_arch.bits() { - 64 => { - if wow64.is_null() { - x86::x64::ARCH - } else { - x86::x32::ARCH - } - } - 32 => x86::x32::ARCH, - _ => return Err(Error::InvalidArchitecture), - }; - trace!("proc_arch={:?}", proc_arch); - - // read native_peb (either the process peb or the peb containing the wow64 helpers) - let native_peb = reader.virt_read_addr_arch( - self.kernel_info.start_block.arch, - eprocess + self.offsets.eproc_peb(), - )?; - trace!("native_peb={:x}", native_peb); - - let section_base = reader.virt_read_addr_arch( - self.kernel_info.start_block.arch, - eprocess + self.offsets.eproc_section_base(), - )?; - trace!("section_base={:x}", section_base); - - let exit_status: Win32ExitStatus = - reader.virt_read(eprocess + self.offsets.eproc_exit_status())?; - trace!("exit_status={}", exit_status); - - // find first ethread - let ethread = reader.virt_read_addr_arch( - self.kernel_info.start_block.arch, - eprocess + self.offsets.eproc_thread_list(), - )? - self.offsets.ethread_list_entry(); - trace!("ethread={:x}", ethread); - - let peb_native = reader - .virt_read_addr_arch( - self.kernel_info.start_block.arch, - eprocess + self.offsets.eproc_peb(), - )? - .non_null() - .ok_or(Error::Other("Could not retrieve peb_native"))?; - - let mut peb_wow64 = None; - - // TODO: does this need to be read with the process ctx? - let (teb, teb_wow64) = if self.kernel_info.kernel_winver >= (6, 2).into() { - let teb = reader.virt_read_addr_arch( - self.kernel_info.start_block.arch, - ethread + self.offsets.kthread_teb(), - )?; - - trace!("teb={:x}", teb); - - if !teb.is_null() { - ( - Some(teb), - if wow64.is_null() { - None - } else { - Some(teb + 0x2000) - }, - ) - } else { - (None, None) - } - } else { - (None, None) - }; - - std::mem::drop(reader); - - // construct reader with process dtb - // TODO: can tlb be used here already? - let mut proc_reader = VirtualDMA::with_vat( - &mut self.phys_mem, - proc_arch, - Win32VirtualTranslate::new(self.kernel_info.start_block.arch, dtb), - DirectTranslate::new(), - ); - - if let Some(teb) = teb_wow64 { - // from here on out we are in the process context - // we will be using the process type architecture now - peb_wow64 = proc_reader - .virt_read_addr_arch( - self.kernel_info.start_block.arch, - teb + self.offsets.teb_peb_x86(), - )? - .non_null(); - - trace!("peb_wow64={:?}", peb_wow64); - } - - trace!("peb_native={:?}", peb_native); - - let module_info_native = - Win32ModuleListInfo::with_peb(&mut proc_reader, peb_native, sys_arch)?; - - let module_info_wow64 = peb_wow64 - .map(|peb| Win32ModuleListInfo::with_peb(&mut proc_reader, peb, proc_arch)) - .transpose()?; - - Ok(Win32ProcessInfo { - address: eprocess, - - pid, - name, - dtb, - section_base, - exit_status, - ethread, - wow64, - - teb, - teb_wow64, - - peb_native, - peb_wow64, - - module_info_native, - module_info_wow64, - - sys_arch, - proc_arch, - }) - } - - pub fn process_info_list_extend>( - &mut self, - list: &mut E, - ) -> Result<()> { - let mut vec = Vec::new(); - self.eprocess_list_extend(&mut vec)?; - for eprocess in vec.into_iter() { - if let Ok(prc) = self.process_info_from_eprocess(eprocess) { - list.extend(Some(prc).into_iter()); - } - } - Ok(()) - } - - /// Retrieves a list of `Win32ProcessInfo` structs for all processes - /// that can be found on the target system. - pub fn process_info_list(&mut self) -> Result> { - let mut list = Vec::new(); - self.process_info_list_extend(&mut list)?; - Ok(list) - } - - /// Finds a process by it's name and returns the `Win32ProcessInfo` struct. - /// If no process with the specified name can be found this function will return an Error. - pub fn process_info(&mut self, name: &str) -> Result { - let name16 = name[..name.len().min(IMAGE_FILE_NAME_LENGTH - 1)].to_lowercase(); - - let process_info_list = self.process_info_list()?; - let candidates = process_info_list - .iter() - .inspect(|process| trace!("{} {}", process.pid(), process.name())) - .filter(|process| { - // strip process name to IMAGE_FILE_NAME_LENGTH without trailing \0 - process.name().to_lowercase() == name16 - }) - .collect::>(); - - for &candidate in candidates.iter() { - // TODO: properly probe pe header here and check ImageBase - // TODO: this wont work with tlb - trace!("inspecting candidate process: {:?}", candidate); - let mut process = Win32Process::with_kernel_ref(self, candidate.clone()); - if process - .module_list()? - .iter() - .inspect(|&module| trace!("{:x} {}", module.base(), module.name())) - .find(|&module| module.name().to_lowercase() == name.to_lowercase()) - .ok_or_else(|| Error::ModuleInfo) - .is_ok() - { - return Ok(candidate.clone()); - } - } - - Err(Error::ProcessInfo) - } - - /// Finds a process by it's process id and returns the `Win32ProcessInfo` struct. - /// If no process with the specified PID can be found this function will return an Error. - /// - /// If the specified PID is 0 the kernel process is returned. - pub fn process_info_pid(&mut self, pid: PID) -> Result { - if pid > 0 { - // regular pid - let process_info_list = self.process_info_list()?; - process_info_list - .into_iter() - .inspect(|process| trace!("{} {}", process.pid(), process.name())) - .find(|process| process.pid == pid) - .ok_or_else(|| Error::Other("pid not found")) - } else { - // kernel pid - self.kernel_process_info() - } - } - - /// Constructs a `Win32Process` struct for the targets kernel by borrowing this kernel instance. - /// - /// This function can be useful for quickly accessing the kernel process. - pub fn kernel_process( - &mut self, - ) -> Result>> { - let proc_info = self.kernel_process_info()?; - Ok(Win32Process::with_kernel_ref(self, proc_info)) - } - - /// Finds a process by its name and constructs a `Win32Process` struct - /// by borrowing this kernel instance. - /// If no process with the specified name can be found this function will return an Error. - /// - /// This function can be useful for quickly accessing a process. - pub fn process( - &mut self, - name: &str, - ) -> Result>> { - let proc_info = self.process_info(name)?; - Ok(Win32Process::with_kernel_ref(self, proc_info)) - } - - /// Finds a process by its process id and constructs a `Win32Process` struct - /// by borrowing this kernel instance. - /// If no process with the specified name can be found this function will return an Error. - /// - /// This function can be useful for quickly accessing a process. - pub fn process_pid( - &mut self, - pid: PID, - ) -> Result>> { - let proc_info = self.process_info_pid(pid)?; - Ok(Win32Process::with_kernel_ref(self, proc_info)) - } - - /// Constructs a `Win32Process` struct by consuming this kernel struct - /// and moving it into the resulting process. - /// - /// If necessary the kernel can be retrieved back by calling `destroy()` on the process after use. - /// - /// This function can be useful for quickly accessing a process. - pub fn into_kernel_process( - mut self, - ) -> Result>> { - let proc_info = self.kernel_process_info()?; - Ok(Win32Process::with_kernel(self, proc_info)) - } - - /// Finds a process by its name and constructs a `Win32Process` struct - /// by consuming the kernel struct and moving it into the process. - /// - /// If necessary the kernel can be retrieved back by calling `destroy()` on the process after use. - /// - /// If no process with the specified name can be found this function will return an Error. - /// - /// This function can be useful for quickly accessing a process. - pub fn into_process( - mut self, - name: &str, - ) -> Result>> { - let proc_info = self.process_info(name)?; - Ok(Win32Process::with_kernel(self, proc_info)) - } - - /// Finds a process by its process id and constructs a `Win32Process` struct - /// by consuming the kernel struct and moving it into the process. - /// - /// If necessary the kernel can be retrieved back by calling `destroy()` on the process again. - /// - /// If no process with the specified name can be found this function will return an Error. - /// - /// This function can be useful for quickly accessing a process. - pub fn into_process_pid( - mut self, - pid: PID, - ) -> Result>> { - let proc_info = self.process_info_pid(pid)?; - Ok(Win32Process::with_kernel(self, proc_info)) - } -} - -impl Kernel { - pub fn builder(connector: T) -> KernelBuilder { - KernelBuilder::::new(connector) - } -} - -impl fmt::Debug for Kernel { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self.kernel_info) - } -} diff --git a/apex_dma/memflow_lib/memflow-win32/src/win32/keyboard.rs b/apex_dma/memflow_lib/memflow-win32/src/win32/keyboard.rs deleted file mode 100644 index 4f96709..0000000 --- a/apex_dma/memflow_lib/memflow-win32/src/win32/keyboard.rs +++ /dev/null @@ -1,210 +0,0 @@ -/*! -Module for reading a target's keyboard state. - -The `gafAsyncKeyState` array contains the current Keyboard state on Windows targets. -This array will internally be read by the [`GetAsyncKeyState()`](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-getasynckeystate) function of Windows. - -Although the gafAsyncKeyState array is exported by the win32kbase.sys kernel module it is only properly mapped into user mode processes. -Therefor the Keyboard will by default find the winlogon.exe or wininit.exe process and use it as a proxy to read the data. - -# Examples: - -``` -use std::{thread, time}; - -use memflow::mem::{PhysicalMemory, VirtualTranslate}; -use memflow_win32::win32::{Kernel, Keyboard}; - -fn test(kernel: &mut Kernel) { - let kbd = Keyboard::try_with(kernel).unwrap(); - - loop { - let kbs = kbd.state_with_kernel(kernel).unwrap(); - println!("space down: {:?}", kbs.is_down(win_key_codes::VK_SPACE)); - thread::sleep(time::Duration::from_millis(1000)); - } -} -``` -*/ -use super::{Kernel, Win32Process, Win32ProcessInfo}; -use crate::error::{Error, Result}; - -use std::convert::TryInto; - -use log::debug; - -use memflow::error::PartialResultExt; -use memflow::mem::{PhysicalMemory, VirtualMemory, VirtualTranslate}; -use memflow::process::OsProcessModuleInfo; -use memflow::types::Address; - -use pelite::{self, pe64::exports::Export, PeView}; - -/// Interface for accessing the target's keyboard state. -#[derive(Clone, Debug)] -pub struct Keyboard { - user_process_info: Win32ProcessInfo, - key_state_addr: Address, -} - -/// Represents the current Keyboardstate. -/// -/// Internally this will hold a 256 * 2 / 8 byte long copy of the gafAsyncKeyState array from the target. -#[derive(Clone)] -pub struct KeyboardState { - buffer: [u8; 256 * 2 / 8], -} - -impl Keyboard { - pub fn try_with( - kernel: &mut Kernel, - ) -> Result { - let kernel_process_info = kernel.kernel_process_info()?; - debug!("found ntoskrnl.exe: {:?}", kernel_process_info); - - let win32kbase_module_info = { - let mut ntoskrnl_process = Win32Process::with_kernel_ref(kernel, kernel_process_info); - ntoskrnl_process.module_info("win32kbase.sys")? - }; - debug!("found win32kbase.sys: {:?}", win32kbase_module_info); - - let user_process_info = kernel - .process_info("winlogon.exe") - .or_else(|_| kernel.process_info("wininit.exe"))?; - let mut user_process = Win32Process::with_kernel_ref(kernel, user_process_info.clone()); - debug!("found user proxy process: {:?}", user_process); - - // read with user_process dtb - let module_buf = user_process - .virt_mem - .virt_read_raw(win32kbase_module_info.base(), win32kbase_module_info.size()) - .data_part()?; - debug!("fetched {:x} bytes from win32kbase.sys", module_buf.len()); - - // TODO: lazy - let export_addr = - Self::find_gaf_pe(&module_buf).or_else(|_| Self::find_gaf_sig(&module_buf))?; - - Ok(Self { - user_process_info, - key_state_addr: win32kbase_module_info.base() + export_addr, - }) - } - - /// Fetches the gafAsyncKeyState from the given virtual reader. - /// This will use the given virtual memory reader to fetch - /// the gafAsyncKeyState from the win32kbase.sys kernel module. - pub fn state(&self, virt_mem: &mut T) -> Result { - let buffer: [u8; 256 * 2 / 8] = virt_mem.virt_read(self.key_state_addr)?; - Ok(KeyboardState { buffer }) - } - - /// Fetches the kernel's gafAsyncKeyState state with the kernel context. - /// This will use the winlogon.exe or wininit.exe process as a proxy for reading - /// the gafAsyncKeyState from the win32kbase.sys kernel module. - pub fn state_with_kernel( - &self, - kernel: &mut Kernel, - ) -> Result { - let mut user_process = - Win32Process::with_kernel_ref(kernel, self.user_process_info.clone()); - self.state(&mut user_process.virt_mem) - } - - /// Fetches the kernel's gafAsyncKeyState state with a processes context. - /// The win32kbase.sys kernel module is accessible with the DTB of a user process - /// so any usermode process can be used to read this memory region. - pub fn state_with_process( - &self, - process: &mut Win32Process, - ) -> Result { - self.state(&mut process.virt_mem) - } - - fn find_gaf_pe(module_buf: &[u8]) -> Result { - let pe = PeView::from_bytes(module_buf).map_err(Error::from)?; - - match pe - .get_export_by_name("gafAsyncKeyState") - .map_err(Error::from)? - { - Export::Symbol(s) => { - debug!("gafAsyncKeyState export found at: {:x}", *s); - Ok(*s as usize) - } - Export::Forward(_) => Err(Error::Other( - "export gafAsyncKeyState found but it is forwarded", - )), - } - } - - // TODO: replace with a custom signature scanning crate - #[cfg(feature = "regex")] - fn find_gaf_sig(module_buf: &[u8]) -> Result { - use ::regex::bytes::*; - - // 48 8B 05 ? ? ? ? 48 89 81 ? ? 00 00 48 8B 8F + 0x3 - let re = Regex::new("(?-u)\\x48\\x8B\\x05(?s:.)(?s:.)(?s:.)(?s:.)\\x48\\x89\\x81(?s:.)(?s:.)\\x00\\x00\\x48\\x8B\\x8F") - .map_err(|_| Error::Other("malformed gafAsyncKeyState signature"))?; - let buf_offs = re - .find(&module_buf[..]) - .ok_or_else(|| Error::Other("unable to find gafAsyncKeyState signature"))? - .start() - + 0x3; - - // compute rip relative addr - let export_offs = buf_offs as u32 - + u32::from_le_bytes(module_buf[buf_offs..buf_offs + 4].try_into().unwrap()) - + 0x4; - debug!("gafAsyncKeyState export found at: {:x}", export_offs); - Ok(export_offs as usize) - } - - #[cfg(not(feature = "regex"))] - fn find_gaf_sig(module_buf: &[u8]) -> Result { - Err(Error::Other("signature scanning requires std")) - } -} - -// #define GET_KS_BYTE(vk) ((vk)*2 / 8) -macro_rules! get_ks_byte { - ($vk:expr) => { - $vk * 2 / 8 - }; -} - -// #define GET_KS_DOWN_BIT(vk) (1 << (((vk) % 4) * 2)) -macro_rules! get_ks_down_bit { - ($vk:expr) => { - 1 << (($vk % 4) * 2) - }; -} - -// #define IS_KEY_DOWN(ks, vk) (((ks)[GET_KS_BYTE(vk)] & GET_KS_DOWN_BIT(vk)) ? true : false) -macro_rules! is_key_down { - ($ks:expr, $vk:expr) => { - ($ks[get_ks_byte!($vk) as usize] & get_ks_down_bit!($vk)) != 0 - }; -} - -// #define IS_KEY_LOCKED(ks, vk) (((ks)[GET_KS_BYTE(vk)] & GET_KS_LOCK_BIT(vk)) ? TRUE : FALSE) - -//#define SET_KEY_LOCKED(ks, vk, down) (ks)[GET_KS_BYTE(vk)] = ((down) ? \ -// ((ks)[GET_KS_BYTE(vk)] | GET_KS_LOCK_BIT(vk)) : \ -// ((ks)[GET_KS_BYTE(vk)] & ~GET_KS_LOCK_BIT(vk))) - -impl KeyboardState { - /// Returns true wether the given key was pressed. - /// This function accepts a valid microsoft virtual keycode. - /// - /// A list of all Keycodes can be found on the [msdn](https://docs.microsoft.com/en-us/windows/win32/inputdev/virtual-key-codes). - /// - /// In case of supplying a invalid key this function will just return false cleanly. - pub fn is_down(&self, vk: i32) -> bool { - if vk < 0 || vk > 256 { - false - } else { - is_key_down!(self.buffer, vk) - } - } -} diff --git a/apex_dma/memflow_lib/memflow-win32/src/win32/module.rs b/apex_dma/memflow_lib/memflow-win32/src/win32/module.rs deleted file mode 100644 index 00f87ef..0000000 --- a/apex_dma/memflow_lib/memflow-win32/src/win32/module.rs +++ /dev/null @@ -1,37 +0,0 @@ -use std::prelude::v1::*; - -use memflow::process::OsProcessModuleInfo; -use memflow::types::Address; - -#[derive(Debug, Clone)] -pub struct Win32ModuleInfo { - pub peb_entry: Address, - pub parent_eprocess: Address, // parent "reference" - - pub base: Address, // _LDR_DATA_TABLE_ENTRY::DllBase - pub size: usize, // _LDR_DATA_TABLE_ENTRY::SizeOfImage - pub path: String, // _LDR_DATA_TABLE_ENTRY::FullDllName - pub name: String, // _LDR_DATA_TABLE_ENTRY::BaseDllName -} - -impl OsProcessModuleInfo for Win32ModuleInfo { - fn address(&self) -> Address { - self.peb_entry - } - - fn parent_process(&self) -> Address { - self.parent_eprocess - } - - fn base(&self) -> Address { - self.base - } - - fn size(&self) -> usize { - self.size - } - - fn name(&self) -> String { - self.name.clone() - } -} diff --git a/apex_dma/memflow_lib/memflow-win32/src/win32/process.rs b/apex_dma/memflow_lib/memflow-win32/src/win32/process.rs deleted file mode 100644 index 1bf7e3f..0000000 --- a/apex_dma/memflow_lib/memflow-win32/src/win32/process.rs +++ /dev/null @@ -1,394 +0,0 @@ -use std::prelude::v1::*; - -use super::{Kernel, Win32ModuleInfo}; -use crate::error::{Error, Result}; -use crate::offsets::Win32ArchOffsets; -use crate::win32::VirtualReadUnicodeString; - -use log::trace; -use std::fmt; - -use memflow::architecture::ArchitectureObj; -use memflow::mem::{PhysicalMemory, VirtualDMA, VirtualMemory, VirtualTranslate}; -use memflow::process::{OsProcessInfo, OsProcessModuleInfo, PID}; -use memflow::types::Address; - -use super::Win32VirtualTranslate; - -/// Exit status of a win32 process -pub type Win32ExitStatus = i32; - -/// Process has not exited yet -pub const EXIT_STATUS_STILL_ACTIVE: i32 = 259; - -/// EPROCESS ImageFileName byte length -pub const IMAGE_FILE_NAME_LENGTH: usize = 15; - -const MAX_ITER_COUNT: usize = 65536; - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -#[cfg_attr(feature = "serde", derive(::serde::Serialize))] -pub struct Win32ModuleListInfo { - module_base: Address, - offsets: Win32ArchOffsets, -} - -impl Win32ModuleListInfo { - pub fn with_peb( - mem: &mut V, - peb: Address, - arch: ArchitectureObj, - ) -> Result { - let offsets = Win32ArchOffsets::from(arch); - - trace!("peb_ldr_offs={:x}", offsets.peb_ldr); - trace!("ldr_list_offs={:x}", offsets.ldr_list); - - let peb_ldr = mem.virt_read_addr_arch(arch, peb + offsets.peb_ldr)?; - trace!("peb_ldr={:x}", peb_ldr); - - let module_base = mem.virt_read_addr_arch(arch, peb_ldr + offsets.ldr_list)?; - - Self::with_base(module_base, arch) - } - - pub fn with_base(module_base: Address, arch: ArchitectureObj) -> Result { - trace!("module_base={:x}", module_base); - - let offsets = Win32ArchOffsets::from(arch); - trace!("offsets={:?}", offsets); - - Ok(Win32ModuleListInfo { - module_base, - offsets, - }) - } - - pub fn module_base(&self) -> Address { - self.module_base - } - - pub fn module_entry_list( - &self, - mem: &mut V, - arch: ArchitectureObj, - ) -> Result> { - let mut list = Vec::new(); - - let list_start = self.module_base; - let mut list_entry = list_start; - for _ in 0..MAX_ITER_COUNT { - list.push(list_entry); - list_entry = mem.virt_read_addr_arch(arch, list_entry)?; - // Break on misaligned entry. On NT 4.0 list end is misaligned, maybe it's a flag? - if list_entry.is_null() - || (list_entry.as_u64() & 0b111) != 0 - || list_entry == self.module_base - { - break; - } - } - - Ok(list) - } - - pub fn module_info_from_entry( - &self, - entry: Address, - parent_eprocess: Address, - mem: &mut V, - arch: ArchitectureObj, - ) -> Result { - let base = mem.virt_read_addr_arch(arch, entry + self.offsets.ldr_data_base)?; - - trace!("base={:x}", base); - - let size = mem - .virt_read_addr_arch(arch, entry + self.offsets.ldr_data_size)? - .as_usize(); - - trace!("size={:x}", size); - - let path = mem.virt_read_unicode_string(arch, entry + self.offsets.ldr_data_full_name)?; - trace!("path={}", path); - - let name = mem.virt_read_unicode_string(arch, entry + self.offsets.ldr_data_base_name)?; - trace!("name={}", name); - - Ok(Win32ModuleInfo { - peb_entry: entry, - parent_eprocess, - base, - size, - path, - name, - }) - } -} - -#[derive(Debug, Clone)] -#[cfg_attr(feature = "serde", derive(::serde::Serialize))] -pub struct Win32ProcessInfo { - pub address: Address, - - // general information from eprocess - pub pid: PID, - pub name: String, - pub dtb: Address, - pub section_base: Address, - pub exit_status: Win32ExitStatus, - pub ethread: Address, - pub wow64: Address, - - // teb - pub teb: Option
, - pub teb_wow64: Option
, - - // peb - pub peb_native: Address, - pub peb_wow64: Option
, - - // modules - pub module_info_native: Win32ModuleListInfo, - pub module_info_wow64: Option, - - // architecture - pub sys_arch: ArchitectureObj, - pub proc_arch: ArchitectureObj, -} - -impl Win32ProcessInfo { - pub fn wow64(&self) -> Address { - self.wow64 - } - - pub fn peb(&self) -> Address { - if let Some(peb) = self.peb_wow64 { - peb - } else { - self.peb_native - } - } - - pub fn peb_native(&self) -> Address { - self.peb_native - } - - pub fn peb_wow64(&self) -> Option
{ - self.peb_wow64 - } - - /// Return the module list information of process native architecture - /// - /// If the process is a wow64 process, module_info_wow64 is returned, otherwise, module_info_native is - /// returned. - pub fn module_info(&self) -> Win32ModuleListInfo { - if !self.wow64.is_null() { - self.module_info_wow64.unwrap() - } else { - self.module_info_native - } - } - - pub fn module_info_native(&self) -> Win32ModuleListInfo { - self.module_info_native - } - - pub fn module_info_wow64(&self) -> Option { - self.module_info_wow64 - } - - pub fn translator(&self) -> Win32VirtualTranslate { - Win32VirtualTranslate::new(self.sys_arch, self.dtb) - } -} - -impl OsProcessInfo for Win32ProcessInfo { - fn address(&self) -> Address { - self.address - } - - fn pid(&self) -> PID { - self.pid - } - - fn name(&self) -> String { - self.name.clone() - } - - fn sys_arch(&self) -> ArchitectureObj { - self.sys_arch - } - - fn proc_arch(&self) -> ArchitectureObj { - self.proc_arch - } -} - -pub struct Win32Process { - pub virt_mem: T, - pub proc_info: Win32ProcessInfo, -} - -// TODO: can be removed i think -impl Clone for Win32Process { - fn clone(&self) -> Self { - Self { - virt_mem: self.virt_mem.clone(), - proc_info: self.proc_info.clone(), - } - } -} - -// TODO: replace the following impls with a dedicated builder -// TODO: add non cloneable thing -impl<'a, T: PhysicalMemory, V: VirtualTranslate> - Win32Process> -{ - pub fn with_kernel(kernel: Kernel, proc_info: Win32ProcessInfo) -> Self { - let virt_mem = VirtualDMA::with_vat( - kernel.phys_mem, - proc_info.proc_arch, - proc_info.translator(), - kernel.vat, - ); - - Self { - virt_mem, - proc_info, - } - } - - /// Consume the self object and returns the containing memory connection - pub fn destroy(self) -> T { - self.virt_mem.destroy() - } -} - -impl<'a, T: PhysicalMemory, V: VirtualTranslate> - Win32Process> -{ - /// Constructs a new process by borrowing a kernel object. - /// - /// Internally this will create a `VirtualDMA` object that also - /// borrows the PhysicalMemory and Vat objects from the kernel. - /// - /// The resulting process object is NOT cloneable due to the mutable borrowing. - /// - /// When u need a cloneable Process u have to use the `::with_kernel` function - /// which will move the kernel object. - pub fn with_kernel_ref(kernel: &'a mut Kernel, proc_info: Win32ProcessInfo) -> Self { - let virt_mem = VirtualDMA::with_vat( - &mut kernel.phys_mem, - proc_info.proc_arch, - proc_info.translator(), - &mut kernel.vat, - ); - - Self { - virt_mem, - proc_info, - } - } -} - -impl Win32Process { - fn module_list_with_infos_extend< - E: Extend, - I: Iterator, - >( - &mut self, - module_infos: I, - out: &mut E, - ) -> Result<()> { - for (info, arch) in module_infos { - out.extend( - info.module_entry_list(&mut self.virt_mem, arch)? - .iter() - .filter_map(|&peb| { - info.module_info_from_entry( - peb, - self.proc_info.address, - &mut self.virt_mem, - arch, - ) - .ok() - }), - ); - } - Ok(()) - } - - pub fn module_entry_list(&mut self) -> Result> { - let (info, arch) = if let Some(info_wow64) = self.proc_info.module_info_wow64 { - (info_wow64, self.proc_info.proc_arch) - } else { - (self.proc_info.module_info_native, self.proc_info.sys_arch) - }; - - info.module_entry_list(&mut self.virt_mem, arch) - } - - pub fn module_entry_list_native(&mut self) -> Result> { - let (info, arch) = (self.proc_info.module_info_native, self.proc_info.sys_arch); - info.module_entry_list(&mut self.virt_mem, arch) - } - - pub fn module_entry_list_wow64(&mut self) -> Result> { - let (info, arch) = ( - self.proc_info - .module_info_wow64 - .ok_or(Error::Other("WoW64 module list does not exist"))?, - self.proc_info.proc_arch, - ); - info.module_entry_list(&mut self.virt_mem, arch) - } - - pub fn module_list(&mut self) -> Result> { - let mut vec = Vec::new(); - self.module_list_extend(&mut vec)?; - Ok(vec) - } - - pub fn module_list_extend>(&mut self, out: &mut E) -> Result<()> { - let infos = [ - ( - Some(self.proc_info.module_info_native), - self.proc_info.sys_arch, - ), - (self.proc_info.module_info_wow64, self.proc_info.proc_arch), - ]; - - let iter = infos - .iter() - .cloned() - .filter_map(|(info, arch)| info.map(|info| (info, arch))); - - self.module_list_with_infos_extend(iter, out) - } - - pub fn main_module_info(&mut self) -> Result { - let module_list = self.module_list()?; - module_list - .into_iter() - .inspect(|module| trace!("{:x} {}", module.base(), module.name())) - .find(|module| module.base == self.proc_info.section_base) - .ok_or_else(|| Error::ModuleInfo) - } - - pub fn module_info(&mut self, name: &str) -> Result { - let module_list = self.module_list()?; - module_list - .into_iter() - .inspect(|module| trace!("{:x} {}", module.base(), module.name())) - .find(|module| module.name() == name) - .ok_or_else(|| Error::ModuleInfo) - } -} - -impl fmt::Debug for Win32Process { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self.proc_info) - } -} diff --git a/apex_dma/memflow_lib/memflow-win32/src/win32/vat.rs b/apex_dma/memflow_lib/memflow-win32/src/win32/vat.rs deleted file mode 100644 index ba0f3ea..0000000 --- a/apex_dma/memflow_lib/memflow-win32/src/win32/vat.rs +++ /dev/null @@ -1,56 +0,0 @@ -use memflow::{ - architecture::{x86, ArchitectureObj, ScopedVirtualTranslate}, - error::Error, - iter::SplitAtIndex, - mem::{PhysicalMemory, VirtualDMA, VirtualMemory, VirtualTranslate}, - types::{Address, PhysicalAddress}, -}; - -#[derive(Debug, Clone, Copy)] -pub struct Win32VirtualTranslate { - pub sys_arch: ArchitectureObj, - pub dtb: Address, -} - -impl Win32VirtualTranslate { - pub fn new(sys_arch: ArchitectureObj, dtb: Address) -> Self { - Self { sys_arch, dtb } - } - - pub fn virt_mem( - self, - mem: T, - vat: V, - proc_arch: ArchitectureObj, - ) -> impl VirtualMemory { - VirtualDMA::with_vat(mem, proc_arch, self, vat) - } -} - -impl ScopedVirtualTranslate for Win32VirtualTranslate { - fn virt_to_phys_iter< - T: PhysicalMemory + ?Sized, - B: SplitAtIndex, - VI: Iterator, - VO: Extend<(PhysicalAddress, B)>, - FO: Extend<(Error, Address, B)>, - >( - &self, - mem: &mut T, - addrs: VI, - out: &mut VO, - out_fail: &mut FO, - arena: &memflow::architecture::Bump, - ) { - let translator = x86::new_translator(self.dtb, self.sys_arch).unwrap(); - translator.virt_to_phys_iter(mem, addrs, out, out_fail, arena) - } - - fn translation_table_id(&self, _address: Address) -> usize { - self.dtb.as_u64().overflowing_shr(12).0 as usize - } - - fn arch(&self) -> ArchitectureObj { - self.sys_arch - } -} diff --git a/apex_dma/memflow_lib/memflow/Cargo.toml b/apex_dma/memflow_lib/memflow/Cargo.toml index ea4f23c..c5049e0 100644 --- a/apex_dma/memflow_lib/memflow/Cargo.toml +++ b/apex_dma/memflow_lib/memflow/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "memflow" -version = "0.1.5" +version = "0.2.1" authors = ["ko1N ", "Aurimas Blažulionis <0x60@pm.me>"] edition = "2018" description = "core components of the memflow physical memory introspection framework" @@ -8,48 +8,106 @@ documentation = "https://docs.rs/memflow" readme = "../README.md" homepage = "https://memflow.github.io" repository = "https://github.com/memflow/memflow" -license-file = "../LICENSE" +license = "MIT" keywords = [ "memflow", "introspection", "memory", "dma" ] categories = [ "memory-management", "os" ] +rust-version = "1.70.0" [badges] maintenance = { status = "actively-developed" } codecov = { repository = "github", branch = "master", service = "github" } [dependencies] -memflow-derive = { version = "0.1", path = "../memflow-derive" } -dataview = { version = "0.1", features = ["derive_pod"] } -log = "0.4" -bitflags = "1.2" -coarsetime = { version = "0.1", optional = true } -smallvec = { version = "1.4", default-features = false } -x86_64 = { version = "0.12", default-features = false } -rand = { version = "0.7", optional = true } -rand_xorshift = { version = "0.2", optional = true } -bumpalo = { version = "3.4", features = ["collections"] } -no-std-compat = { version = "0.4", features = ["alloc"] } -itertools = { version = "0.9", default-features = false } -vector-trees = { version = "0.1", git = "https://github.com/h33p/vector-trees", features = ["bumpalo"] } -hashbrown = "0.8" -libloading = { version = "0.6", optional = true } -memmap = { version = "0.7", optional = true } -dirs = { version = "3.0", optional = true } - -serde = { version = "1.0", optional = true, default-features = false, features = ["derive", "alloc"] } -toml = { version = "0.5", optional = true } +memflow-derive = { version = "0.2", path = "../memflow-derive" } +dataview = { version = "^1.0.0", default-features = false } +log = { version = "^0.4.14", default-features = false } +# we keep bitflags on version 1.x due to the various issues with cbindgen macro expansion and other issues +bitflags = "1.3" +coarsetime = { version = "^0.1.20", optional = true } +smallvec = { version = "^1.7.0", default-features = false } +x86_64 = { version = "0.14.8", default-features = false } +rand = { version = "^0.8.4", optional = true } +rand_xorshift = { version = "^0.3", optional = true } +bumpalo = { version = "^3.11.1", features = ["collections"] } +no-std-compat = { version = "^0.4.1", features = ["alloc"] } +itertools = { version = "^0.12.0", default-features = false } +memmap = { version = "^0.7.0", optional = true } +hashbrown = "^0.14" +fixed-slice-vec = "^0.10.0" +cglue = { version = ">=0.2.10", default-features = false } +rangemap = "^1.0" + +# plugins +libloading = { version = "^0.8.1", optional = true } +dirs = { version = "^5.0.1", optional = true } +# enable unsafe_alignment for now, this is UB, but the idea is to fix pelite in upstream +pelite = { version = "=0.9.0", optional = true, default-features = false, features = ["unsafe_alignment"] } +# we pin abi_stable to prevent abi incompatabilities by different versions +abi_stable = { version = "=0.10.3", optional = true } +once_cell = { version = "^1.9", optional = true } + +goblin = { version = "0.8", optional = true, features = ["pe32", "pe64", "elf32", "elf64", "mach32", "mach64"] } +serde = { version = "^1.0.133", optional = true, default-features = false, features = ["derive", "alloc"] } +toml = { version = "^0.8", optional = true } [dev-dependencies] -rand = { version = "0.7" } -rand_xorshift = "0.2" +rand = { version = "^0.8.4" } +rand_xorshift = "^0.3" +clap = { version = "^4.0.15", features = ["cargo"] } +simplelog = "^0.12.0" +rayon = "^1.5.1" +colored = "^2.0.0" [features] -default = ["std", "serde_derive", "inventory", "filemap", "memmapfiles"] -trace_mmu = [] # enables debug traces in the mmu (very verbose) +default = ["std", "serde_derive", "plugins", "os_helpers", "filemap", "memmapfiles", "64_bit_mem"] +#trace_mmu = [] # enables debug traces in the mmu (very verbose) dummy_mem = ["rand", "rand_xorshift"] -std = ["coarsetime", "no-std-compat/std"] -collections = [] -alloc = [] -serde_derive = ["serde"] +std = ["coarsetime", "no-std-compat/std", "cglue/std"] +serde_derive = ["serde", "cglue/serde"] memmapfiles = ["toml", "serde_derive"] -inventory = ["libloading", "dirs"] +plugins = ["libloading", "dirs", "goblin", "os_helpers", "abi_stable", "cglue/layout_checks", "log/std", "once_cell"] filemap = ["memmap"] +64_bit_mem = [] +os_helpers = ["goblin", "pelite"] +# Until https://github.com/m4b/goblin/pull/386 is merged +unstable_goblin_lossy_macho = [] +# use 128 bit addressing. +# If 64_bit_mem is also enabled, 64-bit mode takes precedence. +# This is because 128-bit mode is not necessary to date, and u128 is not FFI-safe. +128_bit_mem = [] + +[[example]] +name = "read_bench" +path = "examples/read_bench.rs" + +[[example]] +name = "multithreading" +path = "examples/multithreading.rs" + +[[example]] +name = "integration" +path = "examples/integration.rs" + +[[example]] +name = "process_list" +path = "examples/process_list.rs" + +[[example]] +name = "kernel_modules" +path = "examples/kernel_modules.rs" + +[[example]] +name = "kernel_exports" +path = "examples/kernel_exports.rs" + +[[example]] +name = "kernel_maps" +path = "examples/kernel_maps.rs" + +[[example]] +name = "keyboard" +path = "examples/keyboard.rs" + +[[example]] +name = "target_list" +path = "examples/target_list.rs" diff --git a/apex_dma/memflow_lib/memflow/examples/cached_view.rs b/apex_dma/memflow_lib/memflow/examples/cached_view.rs new file mode 100644 index 0000000..c9aa344 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/examples/cached_view.rs @@ -0,0 +1,296 @@ +/*! +This example shows how to create a custom cache validator and use it to cache virtual memory reads on a process. +It also provides an example on how to interact with a cache externally and invalidating values quickly. + +The example simply reads the header of the provided process twice. + +# Usage: +Open process and load the given module with the default dtb. +```bash +cargo run --release --example open_process -- -vvv -c kvm --os win32 --process explorer.exe -m KERNEL32.DLL +``` + +Overwrite dtb with a custom one: +```bash +cargo run --release --example cached_view -- -vv -c kvm --os win32 --process explorer.exe -m KERNEL32.DLL +``` +*/ +use ::std::sync::atomic::Ordering; +use std::sync::{ + atomic::{AtomicI32, AtomicU8}, + Arc, +}; + +use clap::*; +use log::{info, Level}; + +use memflow::prelude::v1::*; + +#[repr(u8)] +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub enum InvalidationFlags { + Always, + Tick, +} + +struct ExternallyControlledValidator { + validator_next_flags: Arc, + validator_tick_count: Arc, +} + +impl ExternallyControlledValidator { + pub fn new() -> Self { + Self { + validator_next_flags: Arc::new(AtomicU8::new(InvalidationFlags::Always as u8)), + validator_tick_count: Arc::new(AtomicI32::new(0)), + } + } + + pub fn set_next_flags(&mut self, flags: InvalidationFlags) { + self.validator_next_flags + .store(flags as u8, Ordering::SeqCst); + } + + pub fn set_tick_count(&mut self, tick_count: i32) { + self.validator_tick_count + .store(tick_count, Ordering::SeqCst); + } + + pub fn validator(&self) -> CustomValidator { + CustomValidator::new( + self.validator_next_flags.clone(), + self.validator_tick_count.clone(), + ) + } +} + +#[derive(Copy, Clone)] +struct ValidatorSlot { + value: i32, + flags: InvalidationFlags, +} + +#[derive(Clone)] +pub struct CustomValidator { + slots: Vec, + + // The invalidation flags used for the next read or write. + next_flags: Arc, + next_flags_local: InvalidationFlags, + + // last_count is used to quickly invalidate slots without having to + // iterate over all slots and invalidating manually. + last_count: i32, + + // frame count is the externally controlled frame number that will + // invalidate specific caches when it is increased. + tick_count: Arc, + tick_count_local: i32, +} + +impl CustomValidator { + pub fn new(next_flags: Arc, tick_count: Arc) -> Self { + Self { + slots: vec![], + + next_flags, + next_flags_local: InvalidationFlags::Always, + + last_count: 0, + + tick_count, + tick_count_local: -1, + } + } +} + +impl CacheValidator for CustomValidator { + // Create a vector containing all slots with a predefined invalid state. + fn allocate_slots(&mut self, slot_count: usize) { + self.slots.resize( + slot_count, + ValidatorSlot { + value: -1, + flags: InvalidationFlags::Always, + }, + ); + } + + // This function is invoked on every batch of memory operations. + // This simply updates the internal state and reads the Atomic variables for the upcoming validations. + fn update_validity(&mut self) { + self.last_count = self.last_count.wrapping_add(1); + + // SAFETY: next_flags is guaranteed to be of type InvalidationFlags + self.next_flags_local = unsafe { + std::mem::transmute::<_, InvalidationFlags>(self.next_flags.load(Ordering::SeqCst)) + }; + + self.tick_count_local = self.tick_count.load(Ordering::SeqCst); + } + + // This simply returns true or false if the slot is valid or not. + // `last_count` is used here to invalidate slots quickly without requiring to iterate over the entire slot list. + fn is_slot_valid(&self, slot_id: usize) -> bool { + // in case we read / write the same page with different flags we force invalidate this slot instantly + if self.next_flags_local != self.slots[slot_id].flags { + return false; + } + + match self.slots[slot_id].flags { + InvalidationFlags::Always => self.slots[slot_id].value == self.last_count, + InvalidationFlags::Tick => self.slots[slot_id].value == self.tick_count_local, + } + } + + // In case the cache is being updates this function marks the slot as being valid. + fn validate_slot(&mut self, slot_id: usize) { + match self.next_flags_local { + InvalidationFlags::Always => self.slots[slot_id].value = self.last_count, + InvalidationFlags::Tick => self.slots[slot_id].value = self.tick_count_local, + } + + self.slots[slot_id].flags = self.next_flags_local; + } + + // In case a slot has to be freed this function resets it to the default values. + fn invalidate_slot(&mut self, slot_id: usize) { + self.slots[slot_id].value = -1; + self.slots[slot_id].flags = InvalidationFlags::Always; + } +} + +fn main() -> Result<()> { + let matches = parse_args(); + let (chain, proc_name, module_name) = extract_args(&matches)?; + + // create inventory + os + let inventory = Inventory::scan(); + let os = inventory.builder().os_chain(chain).build()?; + + let mut process = os + .into_process_by_name(proc_name) + .expect("unable to find process"); + println!("{:?}", process.info()); + + // retrieve module info + let module_info = process + .module_by_name(module_name) + .expect("unable to find module in process"); + println!("{module_info:?}"); + + // create the validator + let mut validator_controller = ExternallyControlledValidator::new(); + let validator = validator_controller.validator(); + + // create CachedView over the processes MemoryView. + let proc_arch = process.info().proc_arch; + let mut cached_process = CachedView::builder(process) + .arch(proc_arch) + .validator(validator) + .cache_size(size::mb(10)) + .build() + .expect("unable to build cache for process"); + + // set the next read to be invalidated only by tick changes + validator_controller.set_next_flags(InvalidationFlags::Tick); + info!("reading module_info.base"); + let _header: [u8; 0x1000] = cached_process + .read(module_info.base) + .data_part() + .expect("unable to read pe header"); + + info!("reading module_info.base from cache"); + let _header: [u8; 0x1000] = cached_process + .read(module_info.base) + .data_part() + .expect("unable to read pe header"); + + // change the frame number to invalidate the cache + validator_controller.set_tick_count(1); + + // read again with the invalidation flags still in place + info!("reading module_info.base again with invalid cache"); + let _header: [u8; 0x1000] = cached_process + .read(module_info.base) + .data_part() + .expect("unable to read pe header"); + + Ok(()) +} + +fn parse_args() -> ArgMatches { + Command::new("open_process example") + .version(crate_version!()) + .author(crate_authors!()) + .arg(Arg::new("verbose").short('v').action(ArgAction::Count)) + .arg( + Arg::new("connector") + .long("connector") + .short('c') + .action(ArgAction::Append) + .required(false), + ) + .arg( + Arg::new("os") + .long("os") + .short('o') + .action(ArgAction::Append) + .required(true), + ) + .arg( + Arg::new("process") + .long("process") + .short('p') + .action(ArgAction::Set) + .required(true) + .default_value("explorer.exe"), + ) + .arg( + Arg::new("module") + .long("module") + .short('m') + .action(ArgAction::Set) + .required(true) + .default_value("KERNEL32.DLL"), + ) + .get_matches() +} + +fn extract_args(matches: &ArgMatches) -> Result<(OsChain<'_>, &str, &str)> { + let log_level = match matches.get_count("verbose") { + 0 => Level::Error, + 1 => Level::Warn, + 2 => Level::Info, + 3 => Level::Debug, + 4 => Level::Trace, + _ => Level::Trace, + }; + simplelog::TermLogger::init( + log_level.to_level_filter(), + simplelog::Config::default(), + simplelog::TerminalMode::Stdout, + simplelog::ColorChoice::Auto, + ) + .unwrap(); + + let conn_iter = matches + .indices_of("connector") + .zip(matches.get_many::("connector")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + let os_iter = matches + .indices_of("os") + .zip(matches.get_many::("os")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + Ok(( + OsChain::new(conn_iter, os_iter)?, + matches.get_one::("process").unwrap(), + matches.get_one::("module").unwrap(), + )) +} diff --git a/apex_dma/memflow_lib/memflow/examples/integration.rs b/apex_dma/memflow_lib/memflow/examples/integration.rs new file mode 100644 index 0000000..8e5b8d2 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/examples/integration.rs @@ -0,0 +1,181 @@ +use memflow::prelude::v1::*; +use memflow::prelude::v1::{ErrorKind, Result}; + +use clap::*; +use log::Level; + +use colored::*; + +static mut HAD_ERROR: bool = false; + +fn main() -> Result<()> { + let matches = parse_args(); + let (chain, sysproc, kernel_mods) = extract_args(&matches)?; + + // create inventory + os + let inventory = Inventory::scan(); + let mut os = inventory.builder().os_chain(chain).build()?; + + { + println!("Kernel info:"); + let base_info = os.info(); + println!( + "base: {:x} ... {}", + base_info.base, + some_str(&base_info.base.non_null()) + ); + println!( + "size: {:x} ... {}", + base_info.size, + bool_str(base_info.size != 0) + ); + println!(); + } + + { + let os_base = os.info().base; + + let mut out = [0u8; 32]; + let phys_mem = as_mut!(os impl PhysicalMemory).expect("no phys mem found"); + phys_mem.phys_read_into(0x1000.into(), &mut out).unwrap(); + println!("Kernel Physical Read: {out:?}"); + + let virt_mem = as_mut!(os impl MemoryView).expect("no virt mem found"); + virt_mem.read_into(os_base, &mut out).unwrap(); + println!("Kernel Virtual Read: {out:?}"); + } + + { + if let Ok(modules) = kernel_modules(&mut os) { + for k in kernel_mods.split(',') { + println!( + "{} ... {}", + k, + some_str(&modules.iter().find(|e| e.name.to_lowercase() == k)) + ); + } + } + println!(); + } + + { + println!("Process List:"); + let prc_list = os.process_info_list()?; + let lsass = prc_list + .iter() + .find(|p| p.name.to_string().to_lowercase() == sysproc); + println!("{} ... {}", &sysproc, some_str(&lsass)); + println!(); + + if let Some(prc) = lsass { + println!("{} info:", prc.name); + println!("pid: {} ... {}", prc.pid, bool_str(prc.pid < 10000)); + } + } + + unsafe { + if HAD_ERROR { + Err(Error(ErrorOrigin::Other, ErrorKind::Unknown) + .log_error("Some errors encountered, not all functionality may be present!")) + } else { + Ok(()) + } + } +} + +fn some_str(r: &Option) -> ColoredString { + bool_str(r.is_some()) +} + +fn ok_str(r: &Result) -> ColoredString { + bool_str(r.is_ok()) +} + +fn bool_str(b: bool) -> ColoredString { + if b { + "ok".green() + } else { + unsafe { HAD_ERROR = true }; + "error".red() + } +} + +fn kernel_modules(kernel: &mut impl Os) -> Result> { + let modules = kernel.module_list().map_err(From::from); + println!("kernel modules ... {}", ok_str(&modules)); + modules +} + +fn parse_args() -> ArgMatches { + Command::new("integration example") + .version(crate_version!()) + .author(crate_authors!()) + .arg(Arg::new("verbose").short('v').action(ArgAction::Count)) + .arg( + Arg::new("connector") + .long("connector") + .short('c') + .action(ArgAction::Append) + .required(false), + ) + .arg( + Arg::new("os") + .long("os") + .short('o') + .action(ArgAction::Append) + .required(true), + ) + .arg( + Arg::new("system-proc") + .long("system-proc") + .short('p') + .action(ArgAction::Set) + .default_value("lsass.exe"), + ) + .arg( + Arg::new("kernel-mods") + .long("kernel-mods") + .short('k') + .action(ArgAction::Set) + .default_value("ntoskrnl.exe,hal.dll"), + ) + .get_matches() +} + +fn extract_args(matches: &ArgMatches) -> Result<(OsChain<'_>, &str, &str)> { + let log_level = match matches.get_count("verbose") { + 0 => Level::Error, + 1 => Level::Warn, + 2 => Level::Info, + 3 => Level::Debug, + 4 => Level::Trace, + _ => Level::Trace, + }; + simplelog::TermLogger::init( + log_level.to_level_filter(), + simplelog::Config::default(), + simplelog::TerminalMode::Stdout, + simplelog::ColorChoice::Auto, + ) + .unwrap(); + + let conn_iter = matches + .indices_of("connector") + .zip(matches.get_many::("connector")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + let os_iter = matches + .indices_of("os") + .zip(matches.get_many::("os")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + Ok(( + OsChain::new(conn_iter, os_iter)?, + matches.get_one::("system-proc").unwrap(), + matches.get_one::("kernel-mods").unwrap(), + )) +} diff --git a/apex_dma/memflow_lib/memflow/examples/kernel_exports.rs b/apex_dma/memflow_lib/memflow/examples/kernel_exports.rs new file mode 100644 index 0000000..1250ecb --- /dev/null +++ b/apex_dma/memflow_lib/memflow/examples/kernel_exports.rs @@ -0,0 +1,81 @@ +/// A simple kernel module list example using memflow +use clap::*; +use log::Level; +use memflow::prelude::v1::*; + +fn main() -> Result<()> { + let matches = parse_args(); + let chain = extract_args(&matches)?; + + // create inventory + os + let inventory = Inventory::scan(); + let mut os = inventory.builder().os_chain(chain).build()?; + + let ntoskrnl = os.primary_module()?; + let exports = os.module_export_list(&ntoskrnl)?; + + // Print export list, formatted + println!("{:>8} {:>32}", "OFFS", "NAME"); + + for e in exports { + println!("{:>8x} {:<32}", e.offset, e.name); + } + + Ok(()) +} + +fn parse_args() -> ArgMatches { + Command::new("kernel_exports example") + .version(crate_version!()) + .author(crate_authors!()) + .arg(Arg::new("verbose").short('v').action(ArgAction::Count)) + .arg( + Arg::new("connector") + .long("connector") + .short('c') + .action(ArgAction::Append) + .required(false), + ) + .arg( + Arg::new("os") + .long("os") + .short('o') + .action(ArgAction::Append) + .required(true), + ) + .get_matches() +} + +fn extract_args(matches: &ArgMatches) -> Result> { + let log_level = match matches.get_count("verbose") { + 0 => Level::Error, + 1 => Level::Warn, + 2 => Level::Info, + 3 => Level::Debug, + 4 => Level::Trace, + _ => Level::Trace, + }; + simplelog::TermLogger::init( + log_level.to_level_filter(), + simplelog::Config::default(), + simplelog::TerminalMode::Stdout, + simplelog::ColorChoice::Auto, + ) + .unwrap(); + + let conn_iter = matches + .indices_of("connector") + .zip(matches.get_many::("connector")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + let os_iter = matches + .indices_of("os") + .zip(matches.get_many::("os")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + OsChain::new(conn_iter, os_iter) +} diff --git a/apex_dma/memflow_lib/memflow/examples/kernel_maps.rs b/apex_dma/memflow_lib/memflow/examples/kernel_maps.rs new file mode 100644 index 0000000..689fb71 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/examples/kernel_maps.rs @@ -0,0 +1,85 @@ +/// A simple kernel module list example using memflow +use clap::*; +use log::Level; + +use memflow::prelude::v1::*; + +fn main() -> Result<()> { + let matches = parse_args(); + let chain = extract_args(&matches)?; + + // create inventory + os + let inventory = Inventory::scan(); + let mut os = inventory.builder().os_chain(chain).build()?; + + let vt = os + .as_mut_impl_virtualtranslate() + .expect("VirtualTranslate is not implemented for this OS plugin"); + + // Print map list, formatted + println!("{:>16} {:>12} {:<}", "ADDR", "SIZE", "TYPE"); + + let callback = &mut |CTup3(addr, size, pagety)| { + println!("{addr:>16x} {size:>12x} {pagety: ArgMatches { + Command::new("kernel_maps example") + .version(crate_version!()) + .author(crate_authors!()) + .arg(Arg::new("verbose").short('v').action(ArgAction::Count)) + .arg( + Arg::new("connector") + .long("connector") + .short('c') + .action(ArgAction::Append) + .required(false), + ) + .arg( + Arg::new("os") + .long("os") + .short('o') + .action(ArgAction::Append) + .required(true), + ) + .get_matches() +} + +fn extract_args(matches: &ArgMatches) -> Result> { + let log_level = match matches.get_count("verbose") { + 0 => Level::Error, + 1 => Level::Warn, + 2 => Level::Info, + 3 => Level::Debug, + 4 => Level::Trace, + _ => Level::Trace, + }; + simplelog::TermLogger::init( + log_level.to_level_filter(), + simplelog::Config::default(), + simplelog::TerminalMode::Stdout, + simplelog::ColorChoice::Auto, + ) + .unwrap(); + + let conn_iter = matches + .indices_of("connector") + .zip(matches.get_many::("connector")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + let os_iter = matches + .indices_of("os") + .zip(matches.get_many::("os")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + OsChain::new(conn_iter, os_iter) +} diff --git a/apex_dma/memflow_lib/memflow/examples/kernel_modules.rs b/apex_dma/memflow_lib/memflow/examples/kernel_modules.rs new file mode 100644 index 0000000..b982151 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/examples/kernel_modules.rs @@ -0,0 +1,87 @@ +/// A simple kernel module list example using memflow +use clap::*; +use log::Level; + +use memflow::prelude::v1::*; + +fn main() -> Result<()> { + let matches = parse_args(); + let chain = extract_args(&matches)?; + + // create inventory + os + let inventory = Inventory::scan(); + let mut os = inventory.builder().os_chain(chain).build()?; + + let module_list = os.module_list()?; + + // Print module list, formatted + println!( + "{:>16} {:>16} {:>8} {:>24} {:<}", + "INTERNAL ADDR", "BASE", "SIZE", "NAME", "PATH" + ); + + for m in module_list { + println!( + "{:>16x} {:>16x} {:>8x} {:>24} {}", + m.address, m.base, m.size, m.name, m.path + ); + } + + Ok(()) +} + +fn parse_args() -> ArgMatches { + Command::new("kernel_modules example") + .version(crate_version!()) + .author(crate_authors!()) + .arg(Arg::new("verbose").short('v').action(ArgAction::Count)) + .arg( + Arg::new("connector") + .long("connector") + .short('c') + .action(ArgAction::Append) + .required(false), + ) + .arg( + Arg::new("os") + .long("os") + .short('o') + .action(ArgAction::Append) + .required(true), + ) + .get_matches() +} + +fn extract_args(matches: &ArgMatches) -> Result> { + let log_level = match matches.get_count("verbose") { + 0 => Level::Error, + 1 => Level::Warn, + 2 => Level::Info, + 3 => Level::Debug, + 4 => Level::Trace, + _ => Level::Trace, + }; + simplelog::TermLogger::init( + log_level.to_level_filter(), + simplelog::Config::default(), + simplelog::TerminalMode::Stdout, + simplelog::ColorChoice::Auto, + ) + .unwrap(); + + let conn_iter = matches + .indices_of("connector") + .zip(matches.get_many::("connector")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + let os_iter = matches + .indices_of("os") + .zip(matches.get_many::("os")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + OsChain::new(conn_iter, os_iter) +} diff --git a/apex_dma/memflow_lib/memflow/examples/keyboard.rs b/apex_dma/memflow_lib/memflow/examples/keyboard.rs new file mode 100644 index 0000000..ddc4c3f --- /dev/null +++ b/apex_dma/memflow_lib/memflow/examples/keyboard.rs @@ -0,0 +1,84 @@ +/// A simple keyboard example using memflow +use clap::*; +use log::Level; + +use memflow::prelude::v1::*; + +fn main() -> Result<()> { + let matches = parse_args(); + let chain = extract_args(&matches)?; + + // create inventory + os + let inventory = Inventory::scan(); + let os = inventory.builder().os_chain(chain).build()?; + + if !os.check_impl_oskeyboard() { + return Err( + Error(ErrorOrigin::Other, ErrorKind::UnsupportedOptionalFeature) + .log_error("keyboard feature is not implemented for the given os plugin"), + ); + } + + let mut keyboard = into!(os impl OsKeyboard).unwrap().into_keyboard()?; + + loop { + println!("space down: {:?}", keyboard.is_down(0x20)); // VK_SPACE + std::thread::sleep(std::time::Duration::from_millis(1000)); + } +} + +fn parse_args() -> ArgMatches { + Command::new("keyboard example") + .version(crate_version!()) + .author(crate_authors!()) + .arg(Arg::new("verbose").short('v').action(ArgAction::Count)) + .arg( + Arg::new("connector") + .long("connector") + .short('c') + .action(ArgAction::Append) + .required(false), + ) + .arg( + Arg::new("os") + .long("os") + .short('o') + .action(ArgAction::Append) + .required(true), + ) + .get_matches() +} + +fn extract_args(matches: &ArgMatches) -> Result> { + let log_level = match matches.get_count("verbose") { + 0 => Level::Error, + 1 => Level::Warn, + 2 => Level::Info, + 3 => Level::Debug, + 4 => Level::Trace, + _ => Level::Trace, + }; + simplelog::TermLogger::init( + log_level.to_level_filter(), + simplelog::Config::default(), + simplelog::TerminalMode::Stdout, + simplelog::ColorChoice::Auto, + ) + .unwrap(); + + let conn_iter = matches + .indices_of("connector") + .zip(matches.get_many::("connector")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + let os_iter = matches + .indices_of("os") + .zip(matches.get_many::("os")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + OsChain::new(conn_iter, os_iter) +} diff --git a/apex_dma/memflow_lib/memflow/examples/mem_maps.rs b/apex_dma/memflow_lib/memflow/examples/mem_maps.rs new file mode 100644 index 0000000..441aff9 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/examples/mem_maps.rs @@ -0,0 +1,108 @@ +/// A simple process list example using memflow +use clap::*; +use log::Level; + +use memflow::prelude::v1::*; + +fn main() -> Result<()> { + let matches = parse_args(); + let (chain, proc_name, gap_size) = extract_args(&matches)?; + + // create inventory + os + let inventory = Inventory::scan(); + let os = inventory.builder().os_chain(chain).build()?; + + let mut process = os + .into_process_by_name(proc_name) + .expect("unable to find process"); + println!("found process: {:?}", process.info()); + + let maps = process.mapped_mem_vec(gap_size); + + // Print module list, formatted + println!("{:>11} {:>11} {:<}", "BASE", "SIZE", "FLAGS"); + + for CTup3(a, s, p) in maps { + println!("0x{a:0>8x} 0x{s:0>8x} {p:?}",); + } + + Ok(()) +} + +fn parse_args() -> ArgMatches { + Command::new("module_list example") + .version(crate_version!()) + .author(crate_authors!()) + .arg(Arg::new("verbose").short('v').action(ArgAction::Count)) + .arg( + Arg::new("connector") + .long("connector") + .short('c') + .action(ArgAction::Append) + .required(false), + ) + .arg( + Arg::new("os") + .long("os") + .short('o') + .action(ArgAction::Append) + .required(true), + ) + .arg( + Arg::new("process") + .long("process") + .short('p') + .action(ArgAction::Set) + .required(true), + ) + .arg( + Arg::new("gap-size") + .long("gap-size") + .short('g') + .action(ArgAction::Set) + .required(false), + ) + .get_matches() +} + +fn extract_args(matches: &ArgMatches) -> Result<(OsChain<'_>, &str, imem)> { + let log_level = match matches.get_count("verbose") { + 0 => Level::Error, + 1 => Level::Warn, + 2 => Level::Info, + 3 => Level::Debug, + 4 => Level::Trace, + _ => Level::Trace, + }; + simplelog::TermLogger::init( + log_level.to_level_filter(), + simplelog::Config::default(), + simplelog::TerminalMode::Stdout, + simplelog::ColorChoice::Auto, + ) + .unwrap(); + + let conn_iter = matches + .indices_of("connector") + .zip(matches.get_many::("connector")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + let os_iter = matches + .indices_of("os") + .zip(matches.get_many::("os")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + Ok(( + OsChain::new(conn_iter, os_iter)?, + matches.get_one::("process").unwrap(), + matches + .get_one::<&str>("gap-size") + .unwrap_or(&"-1") + .parse() + .unwrap(), + )) +} diff --git a/apex_dma/memflow_lib/memflow/examples/module_info.rs b/apex_dma/memflow_lib/memflow/examples/module_info.rs new file mode 100644 index 0000000..d456194 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/examples/module_info.rs @@ -0,0 +1,115 @@ +/// A simple process list example using memflow +use clap::*; +use log::Level; + +use memflow::prelude::v1::*; + +fn main() -> Result<()> { + let matches = parse_args(); + let (chain, proc_name, module_name) = extract_args(&matches)?; + + // create inventory + os + let inventory = Inventory::scan(); + let os = inventory.builder().os_chain(chain).build()?; + + let mut process = os + .into_process_by_name(proc_name) + .expect("unable to find process"); + println!("found process: {:?}", process.info()); + + let module = process + .module_by_name(module_name) + .expect("unable to retrieve module"); + + println!( + "{}:\nBase - {:x}\nSize - {:x}\nArch - {:?}\nFilename - {}", + module.name, module.base, module.size, module.arch, module.path + ); + + if let Ok(s) = process + .module_section_list(&module) + .map_err(|e| panic!("{}", e)) + { + println!("Sections:"); + + for s in s { + println!("0x{:0>8x} 0x{:0>8x} {}", s.base, s.size, s.name); + } + } + + Ok(()) +} + +fn parse_args() -> ArgMatches { + Command::new("module_list example") + .version(crate_version!()) + .author(crate_authors!()) + .arg(Arg::new("verbose").short('v').action(ArgAction::Count)) + .arg( + Arg::new("connector") + .long("connector") + .short('c') + .action(ArgAction::Append) + .required(false), + ) + .arg( + Arg::new("os") + .long("os") + .short('o') + .action(ArgAction::Append) + .required(true), + ) + .arg( + Arg::new("process") + .long("process") + .short('p') + .action(ArgAction::Set) + .required(true), + ) + .arg( + Arg::new("module") + .long("module") + .short('m') + .action(ArgAction::Set) + .required(true), + ) + .get_matches() +} + +fn extract_args(matches: &ArgMatches) -> Result<(OsChain<'_>, &str, &str)> { + let log_level = match matches.get_count("verbose") { + 0 => Level::Error, + 1 => Level::Warn, + 2 => Level::Info, + 3 => Level::Debug, + 4 => Level::Trace, + _ => Level::Trace, + }; + simplelog::TermLogger::init( + log_level.to_level_filter(), + simplelog::Config::default(), + simplelog::TerminalMode::Stdout, + simplelog::ColorChoice::Auto, + ) + .unwrap(); + + let conn_iter = matches + .indices_of("connector") + .zip(matches.get_many::("connector")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + let os_iter = matches + .indices_of("os") + .zip(matches.get_many::("os")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + Ok(( + OsChain::new(conn_iter, os_iter)?, + matches.get_one::("process").unwrap(), + matches.get_one::("module").unwrap(), + )) +} diff --git a/apex_dma/memflow_lib/memflow/examples/module_list.rs b/apex_dma/memflow_lib/memflow/examples/module_list.rs new file mode 100644 index 0000000..4c17b80 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/examples/module_list.rs @@ -0,0 +1,111 @@ +/*! +A simple process list example using memflow + +# Usage: +```bash +cargo run --release --example module_list -- -vvv -c kvm --os win32 --process explorer.exe +``` +*/ +use clap::*; +use log::Level; + +use memflow::prelude::v1::*; + +fn main() -> Result<()> { + let matches = parse_args(); + let (chain, proc_name) = extract_args(&matches)?; + + // create inventory + os + let inventory = Inventory::scan(); + let os = inventory.builder().os_chain(chain).build()?; + + let mut process = os + .into_process_by_name(proc_name) + .expect("unable to find process"); + println!("found process: {:?}", process.info()); + + let module_list = process + .module_list() + .expect("unable to retrieve module list"); + + // Print module list, formatted + println!( + "{:>11} {:>11} {:>11} {:>11} {:<}", + "BASE", "SIZE", "MOD ARCH", "NAME", "PATH" + ); + + for m in module_list { + println!( + "0x{:0>8x} 0x{:0>8x} {:^10} {} ({})", + m.base, m.size, m.arch, m.name, m.path + ); + } + + Ok(()) +} + +fn parse_args() -> ArgMatches { + Command::new("module_list example") + .version(crate_version!()) + .author(crate_authors!()) + .arg(Arg::new("verbose").short('v').action(ArgAction::Count)) + .arg( + Arg::new("connector") + .long("connector") + .short('c') + .action(ArgAction::Append) + .required(false), + ) + .arg( + Arg::new("os") + .long("os") + .short('o') + .action(ArgAction::Append) + .required(true), + ) + .arg( + Arg::new("process") + .long("process") + .short('p') + .action(ArgAction::Set) + .required(true), + ) + .get_matches() +} + +fn extract_args(matches: &ArgMatches) -> Result<(OsChain<'_>, &str)> { + let log_level = match matches.get_count("verbose") { + 0 => Level::Error, + 1 => Level::Warn, + 2 => Level::Info, + 3 => Level::Debug, + 4 => Level::Trace, + _ => Level::Trace, + }; + simplelog::TermLogger::init( + log_level.to_level_filter(), + simplelog::Config::default(), + simplelog::TerminalMode::Stdout, + simplelog::ColorChoice::Auto, + ) + .unwrap(); + + let conn_iter = matches + .indices_of("connector") + .zip(matches.get_many::("connector")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + let os_iter = matches + .indices_of("os") + .zip(matches.get_many::("os")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + Ok(( + OsChain::new(conn_iter, os_iter)?, + matches.get_one::("process").unwrap(), + )) +} diff --git a/apex_dma/memflow_lib/memflow/examples/multithreading.rs b/apex_dma/memflow_lib/memflow/examples/multithreading.rs new file mode 100644 index 0000000..5f1da8c --- /dev/null +++ b/apex_dma/memflow_lib/memflow/examples/multithreading.rs @@ -0,0 +1,160 @@ +use std::thread; + +use clap::*; +use log::{info, Level}; + +use memflow::error::{Error, ErrorKind, ErrorOrigin, Result}; +use memflow::os::*; +use memflow::plugins::*; + +// This function shows how the connector can be cloned. +// For each cloned connector a thread is spawned that initializes a seperate OS instance. +pub fn parallel_init( + connector: ConnectorInstanceArcBox<'static>, + inventory: &Inventory, + os_name: &str, + os_args: &OsArgs, +) { + rayon::scope(|s| { + (0..8).map(|_| connector.clone()).for_each(|c| { + s.spawn(move |_| { + inventory + .create_os(os_name, Some(c), Some(os_args)) + .unwrap(); + }) + }) + }); +} + +// This function shows how a kernel can be cloned. +// For each cloned kernel a thread is spawned that will iterate over all processes of the target in parallel. +pub fn parallel_kernels(kernel: OsInstanceArcBox<'static>) { + (0..8) + .map(|_| kernel.clone()) + .map(|mut k| { + thread::spawn(move || { + let _eprocesses = k.process_address_list().unwrap(); + }) + }) + .for_each(|t| t.join().unwrap()); +} + +// This function shows how a process can be cloned. +// For each cloned process a thread is spawned that will iterate over all the modules of this process in parallel. +pub fn parallel_processes(kernel: OsInstanceArcBox<'static>) { + let process = kernel.into_process_by_name("wininit.exe").unwrap(); + + (0..8) + .map(|_| process.clone()) + .map(|mut p| { + thread::spawn(move || { + let module_list = p.module_list().unwrap(); + info!("wininit.exe module_list: {}", module_list.len()); + }) + }) + .for_each(|t| t.join().unwrap()); +} + +pub fn main() { + let (conn_name, conn_args, os_name, os_args, log_level) = parse_args().unwrap(); + + simplelog::TermLogger::init( + log_level.to_level_filter(), + simplelog::Config::default(), + simplelog::TerminalMode::Stdout, + simplelog::ColorChoice::Auto, + ) + .unwrap(); + + // create inventory + connector + let inventory = Inventory::scan(); + let connector = inventory + .create_connector(&conn_name, None, Some(&conn_args)) + .unwrap(); + + // parallel test functions + // see each function's implementation for further details + + // showcasing parallel initialization of kernel objects + parallel_init(connector.clone(), &inventory, &os_name, &os_args); + + let kernel = inventory + .create_os(&os_name, Some(connector), Some(&os_args)) + .unwrap(); + + // showcasing parallel process iteration + parallel_kernels(kernel.clone()); + + // showcasing parallel module iteration + parallel_processes(kernel); +} + +fn parse_args() -> Result<(String, ConnectorArgs, String, OsArgs, log::Level)> { + let matches = Command::new("multithreading example") + .version(crate_version!()) + .author(crate_authors!()) + .arg(Arg::new("verbose").short('v').action(ArgAction::Count)) + .arg( + Arg::new("connector") + .long("connector") + .short('c') + .action(ArgAction::Set) + .required(true), + ) + .arg( + Arg::new("connector-args") + .long("connector-args") + .short('x') + .action(ArgAction::Set) + .default_value(""), + ) + .arg( + Arg::new("os") + .long("os") + .short('o') + .action(ArgAction::Set) + .required(true), + ) + .arg( + Arg::new("os-args") + .long("os-args") + .short('y') + .action(ArgAction::Set) + .default_value(""), + ) + .get_matches(); + + // set log level + let level = match matches.get_count("verbose") { + 0 => Level::Error, + 1 => Level::Warn, + 2 => Level::Info, + 3 => Level::Debug, + 4 => Level::Trace, + _ => Level::Trace, + }; + + Ok(( + matches + .get_one::("connector") + .ok_or_else(|| { + Error(ErrorOrigin::Other, ErrorKind::Configuration) + .log_error("failed to parse connector") + })? + .into(), + str::parse(matches.get_one::("connector-args").ok_or_else(|| { + Error(ErrorOrigin::Other, ErrorKind::Configuration) + .log_error("failed to parse connector args") + })?)?, + matches + .get_one::("os") + .ok_or_else(|| { + Error(ErrorOrigin::Other, ErrorKind::Configuration).log_error("failed to parse os") + })? + .into(), + str::parse(matches.get_one::("os-args").ok_or_else(|| { + Error(ErrorOrigin::Other, ErrorKind::Configuration).log_error("failed to parse os args") + })?)?, + level, + )) +} diff --git a/apex_dma/memflow_lib/memflow/examples/open_process.rs b/apex_dma/memflow_lib/memflow/examples/open_process.rs new file mode 100644 index 0000000..b9dc6db --- /dev/null +++ b/apex_dma/memflow_lib/memflow/examples/open_process.rs @@ -0,0 +1,159 @@ +/*! +This example shows how to use a dynamically loaded connector in conjunction +with a dynamically loaded os plugin. This example uses the `Inventory` feature of memflow +to load all the required plugins. Plugins are also chainable by providing the `--os` +and `--connector` arguments multiple times. + +The example showcases how to retrieve extended process info data, +opening the process and getting a list of all modules. + +Additionally the process can be initialized with a (optional) user-provided DTB (Directory Table Base). + +# Usage: +Open process and load the given module with the default dtb. +```bash +cargo run --release --example open_process -- -vvv -c kvm --os win32 --process explorer.exe -m KERNEL32.DLL +``` + +Overwrite dtb with a custom one: +```bash +cargo run --release --example open_process -- -vvv -c kvm --os win32 --process explorer.exe -m KERNEL32.DLL --dtb DEADBEEF +``` +*/ +use clap::*; +use log::Level; + +use memflow::prelude::v1::*; + +fn main() -> Result<()> { + let matches = parse_args(); + let (chain, proc_name, module_name, dtb) = extract_args(&matches)?; + + // create inventory + os + let inventory = Inventory::scan(); + let mut os = inventory.builder().os_chain(chain).build()?; + + let mut process = if let Some(dtb) = dtb { + // open process with a custom dtb + let mut proc_info = os + .process_info_by_name(proc_name) + .expect("unable to find process"); + proc_info.dtb1 = dtb; + os.into_process_by_info(proc_info) + .expect("unable to open process") + } else { + // use default dtb + os.into_process_by_name(proc_name) + .expect("unable to find process") + }; + println!("{:?}", process.info()); + + // Alternatively the dtb can be modified after the process has been initialized: + if let Some(dtb) = dtb { + process + .set_dtb(dtb, Address::invalid()) + .expect("unable to modify process dtb"); + } + + // retrieve module info + let module_info = process + .module_by_name(module_name) + .expect("unable to find module in process"); + println!("{module_info:?}"); + + // count exports + let export_count = process + .module_export_list(&module_info) + .expect("unable to get exports") + .len(); + println!("Exports: {export_count}"); + + Ok(()) +} + +fn parse_args() -> ArgMatches { + Command::new("open_process example") + .version(crate_version!()) + .author(crate_authors!()) + .arg(Arg::new("verbose").short('v').action(ArgAction::Count)) + .arg( + Arg::new("connector") + .long("connector") + .short('c') + .action(ArgAction::Append) + .required(false), + ) + .arg( + Arg::new("os") + .long("os") + .short('o') + .action(ArgAction::Append) + .required(true), + ) + .arg( + Arg::new("process") + .long("process") + .short('p') + .action(ArgAction::Set) + .required(true) + .default_value("explorer.exe"), + ) + .arg( + Arg::new("module") + .long("module") + .short('m') + .action(ArgAction::Set) + .required(true) + .default_value("KERNEL32.DLL"), + ) + .arg( + Arg::new("dtb") + .long("dtb") + .short('d') + .action(ArgAction::Set) + .required(false), + ) + .get_matches() +} + +fn extract_args(matches: &ArgMatches) -> Result<(OsChain<'_>, &str, &str, Option
)> { + let log_level = match matches.get_count("verbose") { + 0 => Level::Error, + 1 => Level::Warn, + 2 => Level::Info, + 3 => Level::Debug, + 4 => Level::Trace, + _ => Level::Trace, + }; + simplelog::TermLogger::init( + log_level.to_level_filter(), + simplelog::Config::default(), + simplelog::TerminalMode::Stdout, + simplelog::ColorChoice::Auto, + ) + .unwrap(); + + let conn_iter = matches + .indices_of("connector") + .zip(matches.get_many::("connector")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + let os_iter = matches + .indices_of("os") + .zip(matches.get_many::("os")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + Ok(( + OsChain::new(conn_iter, os_iter)?, + matches.get_one::("process").unwrap(), + matches.get_one::("module").unwrap(), + matches + .get_one::("dtb") + .map(|dtb| umem::from_str_radix(dtb, 16).expect("unable to parse dtb as a hex number")) + .map(Address::from), + )) +} diff --git a/apex_dma/memflow_lib/memflow/examples/process_list.rs b/apex_dma/memflow_lib/memflow/examples/process_list.rs new file mode 100644 index 0000000..3af40be --- /dev/null +++ b/apex_dma/memflow_lib/memflow/examples/process_list.rs @@ -0,0 +1,87 @@ +/// A simple process list example using memflow +use clap::*; +use log::Level; + +use memflow::prelude::v1::*; + +fn main() -> Result<()> { + let matches = parse_args(); + let chain = extract_args(&matches)?; + + // create inventory + os + let inventory = Inventory::scan(); + let mut os = inventory.builder().os_chain(chain).build()?; + + let process_list = os.process_info_list()?; + + // Print process list, formatted + println!( + "{:>5} {:>10} {:>10} {:<}", + "PID", "SYS ARCH", "PROC ARCH", "NAME" + ); + + for p in process_list { + println!( + "{:>5} {:^10} {:^10} {} ({}) ({:?})", + p.pid, p.sys_arch, p.proc_arch, p.name, p.command_line, p.state + ); + } + + Ok(()) +} + +fn parse_args() -> ArgMatches { + Command::new("mfps example") + .version(crate_version!()) + .author(crate_authors!()) + .arg(Arg::new("verbose").short('v').action(ArgAction::Count)) + .arg( + Arg::new("connector") + .long("connector") + .short('c') + .action(ArgAction::Append) + .required(false), + ) + .arg( + Arg::new("os") + .long("os") + .short('o') + .action(ArgAction::Append) + .required(true), + ) + .get_matches() +} + +fn extract_args(matches: &ArgMatches) -> Result> { + let log_level = match matches.get_count("verbose") { + 0 => Level::Error, + 1 => Level::Warn, + 2 => Level::Info, + 3 => Level::Debug, + 4 => Level::Trace, + _ => Level::Trace, + }; + simplelog::TermLogger::init( + log_level.to_level_filter(), + simplelog::Config::default(), + simplelog::TerminalMode::Stdout, + simplelog::ColorChoice::Auto, + ) + .unwrap(); + + let conn_iter = matches + .indices_of("connector") + .zip(matches.get_many::("connector")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + let os_iter = matches + .indices_of("os") + .zip(matches.get_many::("os")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + OsChain::new(conn_iter, os_iter) +} diff --git a/apex_dma/memflow_lib/memflow/examples/read_bench.rs b/apex_dma/memflow_lib/memflow/examples/read_bench.rs new file mode 100644 index 0000000..bda0634 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/examples/read_bench.rs @@ -0,0 +1,218 @@ +use std::io::Write; +use std::time::{Duration, Instant}; + +use clap::*; +use log::Level; + +use memflow::cglue::*; +use memflow::error::Result; +use memflow::mem::*; +use memflow::os::{ModuleInfo, Os, Process}; +use memflow::plugins::*; +use memflow::types::*; + +use rand::{Rng, SeedableRng}; +use rand_xorshift::XorShiftRng as CurRng; + +fn rwtest( + mut proc: impl Process + MemoryView, + module: &ModuleInfo, + chunk_sizes: &[usize], + chunk_counts: &[usize], + read_size: usize, +) { + let mut rng = CurRng::seed_from_u64(0); + + println!("Performance bench:"); + print!("{:#7}", "SIZE"); + + for i in chunk_counts { + print!(", x{:02x} mb/s, x{:02x} calls/s", *i, *i); + } + + println!(); + + let start = Instant::now(); + let mut ttdur = Duration::new(0, 0); + + for i in chunk_sizes { + print!("0x{:05x}", *i); + for o in chunk_counts { + let mut done_size = 0_usize; + let mut total_dur = Duration::new(0, 0); + let mut calls = 0; + let mut bufs = vec![(vec![0_u8; *i], 0); *o]; + + let base_addr = + rng.gen_range(module.base.to_umem()..(module.base.to_umem() + module.size)); + + // This code will increase the read size for higher number of chunks + // Since optimized vtop should scale very well with chunk sizes. + assert!((i.trailing_zeros() as umem) < usize::MAX as umem); + let chunk_multiplier = *o * (i.trailing_zeros() as usize + 1); + + while done_size < read_size * chunk_multiplier { + for (_, addr) in bufs.iter_mut() { + *addr = base_addr + rng.gen_range(0..0x2000); + } + + let now = Instant::now(); + { + let mut batcher = proc.batcher(); + + for (buf, addr) in bufs.iter_mut() { + batcher.read_raw_into(Address::from(*addr), buf); + } + } + total_dur += now.elapsed(); + done_size += *i * *o; + calls += 1; + } + + ttdur += total_dur; + let total_time = total_dur.as_secs_f64(); + + print!( + ", {:8.2}, {:11.2}", + (done_size / 0x0010_0000) as f64 / total_time, + calls as f64 / total_time + ); + std::io::stdout().flush().expect(""); + } + println!(); + } + + let total_dur = start.elapsed(); + println!( + "Total bench time: {:.2} {:.2}", + total_dur.as_secs_f64(), + ttdur.as_secs_f64() + ); +} + +fn read_bench(mut kernel: OsInstanceArcBox) -> Result<()> { + let proc_list = kernel.process_info_list()?; + let mut rng = CurRng::seed_from_u64(rand::thread_rng().gen_range(0..!0u64)); + + let mut cont_cnt = 0usize; + + loop { + let Ok(mut prc) = + kernel.process_by_info(proc_list[rng.gen_range(0..proc_list.len())].clone()) + else { + cont_cnt += 1; + if cont_cnt.count_ones() == 1 && cont_cnt > 10 { + println!("Warning: could not get proc {cont_cnt} times in a row"); + } + continue; + }; + + cont_cnt = 0; + + let mod_list: Vec = prc + .module_list()? + .into_iter() + .filter(|module| module.size > 0x1000) + .collect(); + + if !mod_list.is_empty() { + let tmod = &mod_list[rng.gen_range(0..mod_list.len())]; + println!( + "Found test module {} ({:x}) in {}", + tmod.name, + tmod.size, + prc.info().name, + ); + + let mem_map = prc.mapped_mem_vec(smem::gb(1)); + + println!("Mapped memory map (with up to 1GB gaps):"); + + for CTup3(address, size, pt) in mem_map { + println!("{:x}-{:x} {:?}", address, address + size, pt); + } + + rwtest( + prc, + tmod, + &[0x10000, 0x1000, 0x100, 0x10, 0x8], + &[32, 8, 1], + 0x0010_0000, + ); + + break; + } + } + + Ok(()) +} + +fn main() -> Result<()> { + let matches = parse_args(); + let (chain, log_level) = extract_args(&matches)?; + + simplelog::TermLogger::init( + log_level.to_level_filter(), + simplelog::Config::default(), + simplelog::TerminalMode::Stdout, + simplelog::ColorChoice::Auto, + ) + .unwrap(); + + // create connector + os + let inventory = Inventory::scan(); + + let os = inventory.builder().os_chain(chain).build()?; + + read_bench(os) +} + +fn parse_args() -> ArgMatches { + Command::new("read_bench example") + .version(crate_version!()) + .author(crate_authors!()) + .arg(Arg::new("verbose").short('v').action(ArgAction::Count)) + .arg( + Arg::new("connector") + .long("connector") + .short('c') + .action(ArgAction::Append) + .required(false), + ) + .arg( + Arg::new("os") + .long("os") + .short('o') + .action(ArgAction::Append) + .required(true), + ) + .get_matches() +} + +fn extract_args(matches: &ArgMatches) -> Result<(OsChain<'_>, log::Level)> { + // set log level + let level = match matches.get_count("verbose") { + 0 => Level::Error, + 1 => Level::Warn, + 2 => Level::Info, + 3 => Level::Debug, + 4 => Level::Trace, + _ => Level::Trace, + }; + + let conn_iter = matches + .indices_of("connector") + .zip(matches.get_many::("connector")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + let os_iter = matches + .indices_of("os") + .zip(matches.get_many::("os")) + .map(|(a, b)| a.zip(b.map(String::as_str))) + .into_iter() + .flatten(); + + Ok((OsChain::new(conn_iter, os_iter)?, level)) +} diff --git a/apex_dma/memflow_lib/memflow/examples/target_list.rs b/apex_dma/memflow_lib/memflow/examples/target_list.rs new file mode 100644 index 0000000..0a4f241 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/examples/target_list.rs @@ -0,0 +1,59 @@ +use memflow::plugins::*; + +use clap::*; +use log::Level; + +fn main() { + let connector = parse_args(); + + // create inventory + let inventory = Inventory::scan(); + + // try to get help text + println!( + "Connector help:\n{}", + inventory.connector_help(&connector).unwrap_or_default() + ); + + // try to get target list + let targets = inventory + .connector_target_list(&connector) + .expect("unable to get target list"); + + println!("Targets for connector `{}`:", &connector); + targets.iter().for_each(|t| println!("- {}", t.name)); +} + +fn parse_args() -> String { + let matches = Command::new("multithreading example") + .version(crate_version!()) + .author(crate_authors!()) + .arg(Arg::new("verbose").short('v').action(ArgAction::Count)) + .arg( + Arg::new("connector") + .long("connector") + .short('c') + .action(ArgAction::Set) + .required(true), + ) + .get_matches(); + + // set log level + let log_level = match matches.get_count("verbose") { + 0 => Level::Error, + 1 => Level::Warn, + 2 => Level::Info, + 3 => Level::Debug, + 4 => Level::Trace, + _ => Level::Trace, + }; + simplelog::TermLogger::init( + log_level.to_level_filter(), + simplelog::Config::default(), + simplelog::TerminalMode::Stdout, + simplelog::ColorChoice::Auto, + ) + .unwrap(); + + matches.get_one::("connector").unwrap().into() +} diff --git a/apex_dma/memflow_lib/memflow/src/architecture/arm/aarch64.rs b/apex_dma/memflow_lib/memflow/src/architecture/arm/aarch64.rs new file mode 100644 index 0000000..aa79579 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/architecture/arm/aarch64.rs @@ -0,0 +1,48 @@ +use super::{ + super::{ArchitectureObj, Endianess}, + ArmArchitecture, ArmVirtualTranslate, +}; + +use crate::mem::virt_translate::mmu::ArchMmuDef; + +use crate::types::Address; + +const ARCH_4K_MMU_DEF: ArchMmuDef = ArchMmuDef { + virtual_address_splits: &[9, 9, 9, 9, 12], + valid_final_page_steps: &[2, 3, 4], + address_space_bits: 48, + endianess: Endianess::LittleEndian, + addr_size: 8, + pte_size: 8, + present_bit: |a| a.bit_at(0), + writeable_bit: |a, _| a.bit_at(10), + nx_bit: |a, _| a.bit_at(54), + large_page_bit: |a| !a.bit_at(1), +}; + +pub(super) static ARCH_SPEC: ArmArchitecture = ArmArchitecture { + bits: 64, + mmu: ARCH_4K_MMU_DEF.into_spec(), +}; + +pub static ARCH: ArchitectureObj = &ARCH_SPEC; + +pub fn new_translator(dtb1: Address, dtb2: Address) -> ArmVirtualTranslate { + ArmVirtualTranslate::new(&ARCH_SPEC, dtb1, dtb2) +} + +pub(super) static ARCH_SPEC_16K: ArmArchitecture = ArmArchitecture { + bits: 64, + mmu: ArchMmuDef { + virtual_address_splits: &[1, 11, 11, 11, 14], + valid_final_page_steps: &[3, 4], + ..ARCH_4K_MMU_DEF + } + .into_spec(), +}; + +pub static ARCH_16K: ArchitectureObj = &ARCH_SPEC_16K; + +pub fn new_translator_16k(dtb1: Address, dtb2: Address) -> ArmVirtualTranslate { + ArmVirtualTranslate::new(&ARCH_SPEC_16K, dtb1, dtb2) +} diff --git a/apex_dma/memflow_lib/memflow/src/architecture/arm/mod.rs b/apex_dma/memflow_lib/memflow/src/architecture/arm/mod.rs new file mode 100644 index 0000000..a94f2b1 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/architecture/arm/mod.rs @@ -0,0 +1,167 @@ +pub mod aarch64; + +use super::{Architecture, ArchitectureIdent, ArchitectureObj, Endianess}; + +use crate::mem::virt_translate::{ + mmu::{ + translate_data::{TranslateDataVec, TranslationChunk}, + ArchMmuSpec, MmuTranslationBase, + }, + VirtualTranslate3, VtopFailureCallback, VtopOutputCallback, +}; + +use crate::error::{Error, ErrorKind, ErrorOrigin, Result}; +use crate::iter::SplitAtIndex; +use crate::mem::PhysicalMemory; +use crate::types::{size, umem, Address}; +use cglue::tuple::*; + +pub struct ArmArchitecture { + /// Defines how many bits does the native word size have + bits: u8, + /// Defines the underlying MMU used for address translation + mmu: ArchMmuSpec, +} + +impl Architecture for ArmArchitecture { + fn bits(&self) -> u8 { + self.bits + } + + fn endianess(&self) -> Endianess { + self.mmu.def.endianess + } + + fn page_size(&self) -> usize { + self.mmu.page_size_level(1) as usize + } + + fn size_addr(&self) -> usize { + self.mmu.def.addr_size.into() + } + + fn address_space_bits(&self) -> u8 { + self.mmu.def.address_space_bits + } + + fn ident(&self) -> ArchitectureIdent { + ArchitectureIdent::AArch64(size::kb(4)) + } +} + +// TODO: Add granularity +#[derive(Clone, Copy)] +pub struct ArmVirtualTranslate { + arch: &'static ArmArchitecture, + dtb: ArmPageTableBase, +} + +impl ArmVirtualTranslate { + pub fn new(arch: &'static ArmArchitecture, dtb1: Address, dtb2: Address) -> Self { + Self { + arch, + dtb: ArmPageTableBase(dtb1, dtb2), + } + } +} + +#[derive(Clone, Copy, Debug)] +pub struct ArmPageTableBase(Address, Address); + +impl MmuTranslationBase for ArmPageTableBase { + fn get_pt_by_virt_addr(&self, addr: Address) -> Address { + //TODO: handle for Arm 32 + if (addr.to_umem().to_be() & 1) == 1 { + self.1 + } else { + self.0 + } + } + + fn get_pt_by_index(&self, idx: usize) -> (Address, usize) { + if idx < 256 { + (self.0, idx) + } else { + (self.1, idx) + } + } + + fn pt_count(&self) -> usize { + 2 + } + + fn virt_addr_filter( + &self, + spec: &ArchMmuSpec, + addr: CTup3, + work_group: (&mut TranslationChunk, &mut TranslateDataVec), + out_fail: &mut VtopFailureCallback, + ) where + B: SplitAtIndex, + { + spec.virt_addr_filter(addr, work_group, out_fail); + } +} + +impl VirtualTranslate3 for ArmVirtualTranslate { + fn virt_to_phys_iter< + T: PhysicalMemory + ?Sized, + B: SplitAtIndex, + VI: Iterator>, + >( + &self, + mem: &mut T, + addrs: VI, + out: &mut VtopOutputCallback, + out_fail: &mut VtopFailureCallback, + tmp_buf: &mut [std::mem::MaybeUninit], + ) { + self.arch + .mmu + .virt_to_phys_iter(mem, self.dtb, addrs, out, out_fail, tmp_buf) + } + + fn translation_table_id(&self, address: Address) -> umem { + self.dtb + .get_pt_by_virt_addr(address) + .to_umem() + .overflowing_shr(11) + .0 + } + + fn arch(&self) -> ArchitectureObj { + self.arch + } +} + +// This lint doesn't make any sense in our usecase, since we nevel leak ARCH_SPECs, and ARCH is +// a static trait object with a consistent address. +fn underlying_arch(arch: ArchitectureObj) -> Option<&'static ArmArchitecture> { + if arch == aarch64::ARCH { + Some(&aarch64::ARCH_SPEC) + } else { + None + } +} + +pub fn new_translator( + dtb1: Address, + dtb2: Address, + arch: ArchitectureObj, +) -> Result { + let arch = + underlying_arch(arch).ok_or(Error(ErrorOrigin::Mmu, ErrorKind::InvalidArchitecture))?; + Ok(ArmVirtualTranslate::new(arch, dtb1, dtb2)) +} + +pub fn new_translator_nonsplit( + dtb: Address, + arch: ArchitectureObj, +) -> Result { + // TODO: Handle 32 bit arm + new_translator(dtb, dtb + size::kb(2), arch) +} + +pub fn is_arm_arch(arch: ArchitectureObj) -> bool { + underlying_arch(arch).is_some() +} diff --git a/apex_dma/memflow_lib/memflow/src/architecture/mmu_spec.rs b/apex_dma/memflow_lib/memflow/src/architecture/mmu_spec.rs deleted file mode 100644 index b9fcffd..0000000 --- a/apex_dma/memflow_lib/memflow/src/architecture/mmu_spec.rs +++ /dev/null @@ -1,526 +0,0 @@ -pub(crate) mod translate_data; - -use crate::error::{Error, Result}; -use crate::iter::{PageChunks, SplitAtIndex}; -use crate::mem::{PhysicalMemory, PhysicalReadData}; -use crate::types::{Address, PageType, PhysicalAddress}; -use std::convert::TryInto; -use translate_data::{TranslateData, TranslateVec, TranslationChunk}; - -use bumpalo::{collections::Vec as BumpVec, Bump}; -use vector_trees::{BVecTreeMap as BTreeMap, Vector}; - -#[cfg(feature = "trace_mmu")] -macro_rules! vtop_trace { - ( $( $x:expr ),* ) => { - log::trace!( $($x, )* ); - } -} - -#[cfg(not(feature = "trace_mmu"))] -macro_rules! vtop_trace { - ( $( $x:expr ),* ) => {}; -} - -/// The `ArchMMUSpec` structure defines how a real memory management unit should behave when -/// translating virtual memory addresses to physical ones. -/// -/// The core logic of virtual to physical memory translation is practically the same, but different -/// MMUs may have different address space sizes, and thus split the addresses in different ways. -/// -/// For instance, most x86_64 architectures have 4 levels of page mapping, providing 52-bit address -/// space. Virtual address gets split into 4 9-bit regions, and a 12-bit one, the first 4 are used -/// to index the page tables, and the last, 12-bit split is used as an offset to get the final -/// memory address. Meanwhile, x86 with PAE has 3 levels of page mapping, providing 36-bit address -/// space. Virtual address gets split into a 2-bit, 2 9-bit and a 12-bit regions - the last one is -/// also used as an offset from the physical frame. The difference is of level count, and virtual -/// address splits, but the core page table walk stays the same. -/// -/// Our virtual to physical memory ranslation code is the same for both architectures, in fact, it -/// is also the same for the x86 (non-PAE) architecture that has different PTE and pointer sizes. -/// All that differentiates the translation process is the data inside this structure. -#[derive(Debug)] -pub struct ArchMMUSpec { - /// defines the way virtual addresses gets split (the last element - /// being the final physical page offset, and thus treated a bit differently) - pub virtual_address_splits: &'static [u8], - /// defines at which page mapping steps we can return a large page. - /// Steps are indexed from 0, and the list has to be sorted, otherwise the code may fail. - pub valid_final_page_steps: &'static [usize], - /// define the address space upper bound (32 for x86, 52 for x86_64) - pub address_space_bits: u8, - /// native pointer size in bytes for the architecture. - pub addr_size: u8, - /// size of an individual page table entry in bytes. - pub pte_size: usize, - /// index of a bit in PTE defining whether the page is present or not. - pub present_bit: u8, - /// index of a bit in PTE defining if the page is writeable. - pub writeable_bit: u8, - /// index of a bit in PTE defining if the page is non-executable. - pub nx_bit: u8, - /// index of a bit in PTE defining if the PTE points to a large page. - pub large_page_bit: u8, -} - -pub trait MMUTranslationBase { - fn get_initial_pt(&self, address: Address) -> Address; - - fn get_pt_by_index(&self, _: usize) -> Address; - - fn pt_count(&self) -> usize; - - fn virt_addr_filter>( - &self, - spec: &ArchMMUSpec, - addr: (Address, B), - data_to_translate: &mut TranslateVec, - out_fail: &mut O, - ); -} - -impl ArchMMUSpec { - /// Mask a page table entry address to retrieve the next page table entry - /// - /// This function uses virtual_address_splits to mask the first bits out in `pte_addr`, but - /// keep everything else until the `address_space_bits` upper bound. - /// - /// # Arguments - /// - /// * `pte_addr` - page table entry address to mask - /// * `step` - the current step in the page walk - /// - /// # Remarks - /// - /// The final step is handled differently, because the final split provides a byte offset to - /// the page, instead of an offset that has to be multiplied by `pte_size`. We do that by - /// subtracting `pte_size` logarithm from the split size. - pub fn pte_addr_mask(&self, pte_addr: Address, step: usize) -> u64 { - let max = self.address_space_bits - 1; - let min = self.virtual_address_splits[step] - + if step == self.virtual_address_splits.len() - 1 { - 0 - } else { - self.pte_size.to_le().trailing_zeros() as u8 - }; - let mask = Address::bit_mask(min..max); - vtop_trace!("pte_addr_mask={:b}", mask.as_u64()); - pte_addr.as_u64() & mask.as_u64() - } - - /// Filter out the input virtual address range to be in bounds - /// - /// - /// # Arguments - /// - /// * `(addr, buf)` - an address and buffer pair that gets split and filtered - /// * `valid_out` - output collection that contains valid splits - /// * `fail_out` - the final collection where the function will push rejected ranges to - /// - /// # Remarks - /// - /// This function cuts the input virtual address to be inside range `(-2^address_space_bits; - /// +2^address_space_bits)`. It may result in 2 ranges, and it may have up to 2 failed ranges - pub(crate) fn virt_addr_filter( - &self, - (addr, buf): (Address, B), - valid_out: &mut VO, - fail_out: &mut FO, - ) where - B: SplitAtIndex, - VO: Extend>, - FO: Extend<(Error, Address, B)>, - { - let mut tr_data = TranslateData { addr, buf }; - - let (mut left, reject) = - tr_data.split_inclusive_at(Address::bit_mask(0..(self.addr_size * 8 - 1)).as_usize()); - - if let Some(data) = reject { - fail_out.extend(Some((Error::VirtualTranslate, data.addr, data.buf))); - } - - let virt_bit_range = self.virt_addr_bit_range(0).1; - let virt_range = 1usize << (virt_bit_range - 1); - - let (lower, higher) = left.split_at(virt_range); - - if lower.length() > 0 { - valid_out.extend(Some(lower).into_iter()); - } - - if let Some(mut data) = higher { - let (reject, higher) = data.split_at_rev(virt_range); - - // The upper half has to be all negative (all bits set), so compare the masks to see if - // it is the case. - let lhs = Address::bit_mask(virt_bit_range..(self.addr_size * 8 - 1)).as_u64(); - let rhs = higher.addr.as_u64() & lhs; - - if (lhs ^ rhs) != 0 { - return; - } - - if higher.length() > 0 { - valid_out.extend(Some(higher).into_iter()); - } - - if let Some(data) = reject { - fail_out.extend(Some((Error::VirtualTranslate, data.addr, data.buf))); - } - } - } - - fn virt_addr_bit_range(&self, step: usize) -> (u8, u8) { - let max_index_bits = self.virtual_address_splits[step..].iter().sum::(); - let min_index_bits = max_index_bits - self.virtual_address_splits[step]; - (min_index_bits, max_index_bits) - } - - fn virt_addr_to_pte_offset(&self, virt_addr: Address, step: usize) -> u64 { - let (min, max) = self.virt_addr_bit_range(step); - vtop_trace!("virt_addr_bit_range for step {} = ({}, {})", step, min, max); - - let shifted = virt_addr.as_u64() >> min; - let mask = Address::bit_mask(0..(max - min - 1)); - - (shifted & mask.as_u64()) * self.pte_size as u64 - } - - fn virt_addr_to_page_offset(&self, virt_addr: Address, step: usize) -> u64 { - let max = self.virt_addr_bit_range(step).1; - virt_addr.as_u64() & Address::bit_mask(0..(max - 1)).as_u64() - } - - /// Return the number of splits of virtual addresses - /// - /// The returned value will be one more than the number of page table levels - pub fn split_count(&self) -> usize { - self.virtual_address_splits.len() - } - - /// Calculate the size of the page table entry leaf in bytes - /// - /// This will return the number of page table entries at a specific step multiplied by the - /// `pte_size`. Usually this will be an entire page, but in certain cases, like the highest - /// mapping level of x86 with PAE, it will be less. - /// - /// # Arguments - /// - /// * `step` - the current step in the page walk - pub fn pt_leaf_size(&self, step: usize) -> usize { - let (min, max) = self.virt_addr_bit_range(step); - (1 << (max - min)) * self.pte_size - } - - /// Perform a virtual translation step, returning the next PTE address to read - /// - /// # Arguments - /// - /// * `pte_addr` - input PTE address that was read the last time (or DTB) - /// * `virt_addr` - virtual address we are translating - /// * `step` - the current step in the page walk - pub fn vtop_step(&self, pte_addr: Address, virt_addr: Address, step: usize) -> Address { - Address::from( - self.pte_addr_mask(pte_addr, step) | self.virt_addr_to_pte_offset(virt_addr, step), - ) - } - - /// Get the page size of a specific step without checking if such page could exist - /// - /// # Arguments - /// - /// * `step` - the current step in the page walk - pub fn page_size_step_unchecked(&self, step: usize) -> usize { - let max_index_bits = self.virtual_address_splits[step..].iter().sum::(); - (1u64 << max_index_bits) as usize - } - - /// Get the page size of a specific page walk step - /// - /// This function is preferable to use externally, because in debug builds it will check if such - /// page could exist, and if can not, it will panic - /// - /// # Arguments - /// - /// * `step` - the current step in the page walk - pub fn page_size_step(&self, step: usize) -> usize { - debug_assert!(self.valid_final_page_steps.binary_search(&step).is_ok()); - self.page_size_step_unchecked(step) - } - - /// Get the page size of a specific mapping level - /// - /// This function is the same as `page_size_step`, but the `level` almost gets inverted. It - /// goes in line with x86 page level naming. With 1 being the 4kb page, and higher meaning - /// larger page. - /// - /// # Arguments - /// - /// * `level` - page mapping level to get the size of (1 meaning the smallest page) - pub fn page_size_level(&self, level: usize) -> usize { - self.page_size_step(self.virtual_address_splits.len() - level) - } - - /// Get the final physical page - /// - /// This performs the final step of a successful translation - retrieve the final physical - /// address. It does not perform any present checks, and assumes `pte_addr` points to a valid - /// page. - /// - /// # Arguments - /// - /// * `pte_addr` - the address inside the previously read PTE - /// * `virt_addr` - the virtual address we are currently translating - /// * `step` - the current step in the page walk - pub fn get_phys_page( - &self, - pte_addr: Address, - virt_addr: Address, - step: usize, - ) -> PhysicalAddress { - let phys_addr = Address::from( - self.pte_addr_mask(pte_addr, step) | self.virt_addr_to_page_offset(virt_addr, step), - ); - - PhysicalAddress::with_page( - phys_addr, - PageType::default() - .write(pte_addr.bit_at(self.writeable_bit)) - .noexec(pte_addr.bit_at(self.nx_bit)), - self.page_size_step(step), - ) - } - - /// Check if the current page table entry is valid - /// - /// # Arguments - /// - /// * `pte_addr` - current page table entry - /// * `step` - the current step in the page walk - pub fn check_entry(&self, pte_addr: Address, step: usize) -> bool { - step == 0 || pte_addr.bit_at(self.present_bit) - } - - /// Check if the current page table entry contains a physical page - /// - /// This will check `valid_final_page_steps` to determine if the PTE could have a large page, - /// and then check the large page bit for confirmation. It will always return true on the final - /// mapping regarding of the values in `valid_final_page_steps`. The `valid_final_page_steps` - /// list has to be sorted for the function to work properly, because it uses binary search. - /// - /// # Arguments - /// - /// * `pte_addr` - current page table entry - /// * `step` - the current step the page walk - pub fn is_final_mapping(&self, pte_addr: Address, step: usize) -> bool { - (step == self.virtual_address_splits.len() - 1) - || (pte_addr.bit_at(self.large_page_bit) - && self.valid_final_page_steps.binary_search(&step).is_ok()) - } - - /// This function will do a virtual to physical memory translation for the `ArchMMUSpec` in - /// `MMUTranslationBase` scope, over multiple elements. - pub(crate) fn virt_to_phys_iter( - &self, - mem: &mut T, - dtb: D, - addrs: VI, - out: &mut VO, - out_fail: &mut FO, - arena: &Bump, - ) where - T: PhysicalMemory + ?Sized, - B: SplitAtIndex, - D: MMUTranslationBase, - VI: Iterator, - VO: Extend<(PhysicalAddress, B)>, - FO: Extend<(Error, Address, B)>, - { - vtop_trace!("virt_to_phys_iter_with_mmu"); - - let mut data_to_translate = BumpVec::new_in(arena); - let mut data_pt_read: BumpVec = BumpVec::new_in(arena); - let mut data_pt_buf = BumpVec::new_in(arena); - let mut data_to_translate_map = BTreeMap::new_in(BumpVec::new_in(arena)); - - //TODO: Calculate and reserve enough data in the data_to_translate vectors - //TODO: precalc vtop_step bit split sum / transform the splits to a lookup table - //TODO: Improve filtering speed (vec reserve) - //TODO: Optimize BTreeMap - - data_to_translate - .extend((0..dtb.pt_count()).map(|idx| { - TranslationChunk::new(dtb.get_pt_by_index(idx), BumpVec::new_in(arena)) - })); - - addrs.for_each(|data| dtb.virt_addr_filter(self, data, &mut data_to_translate, out_fail)); - - data_to_translate - .iter_mut() - .for_each(|trd| trd.recalc_minmax()); - - for pt_step in 0..self.split_count() { - vtop_trace!( - "pt_step = {}, data_to_translate.len() = {:x}", - pt_step, - data_to_translate.len() - ); - - let next_page_size = self.page_size_step_unchecked(pt_step + 1); - - vtop_trace!("next_page_size = {:x}", next_page_size); - - //Loop through the data in reverse order to allow the data buffer grow on the back when - //memory regions are split - for i in (0..data_to_translate.len()).rev() { - let tr_chunk = data_to_translate.swap_remove(i); - vtop_trace!( - "checking pt_addr={:x}, elems={:x}", - tr_chunk.pt_addr, - tr_chunk.vec.len() - ); - - if !self.check_entry(tr_chunk.pt_addr, pt_step) { - //There has been an error in translation, push it to output with the associated buf - vtop_trace!("check_entry failed"); - out_fail.extend( - tr_chunk - .vec - .into_iter() - .map(|entry| (Error::VirtualTranslate, entry.addr, entry.buf)), - ); - } else if self.is_final_mapping(tr_chunk.pt_addr, pt_step) { - //We reached an actual page. The translation was successful - vtop_trace!("found final mapping: {:x}", tr_chunk.pt_addr); - let pt_addr = tr_chunk.pt_addr; - out.extend(tr_chunk.vec.into_iter().map(|entry| { - (self.get_phys_page(pt_addr, entry.addr, pt_step), entry.buf) - })); - } else { - //We still need to continue the page walk - - let min_addr = tr_chunk.min_addr(); - - //As an optimization, divide and conquer the input memory regions. - //VTOP speedup is insane. Visible in large sequential or chunked reads. - for (_, (_, mut chunk)) in - (arena, tr_chunk).page_chunks(min_addr, next_page_size) - { - let pt_addr = self.vtop_step(chunk.pt_addr, chunk.min_addr(), pt_step); - chunk.pt_addr = pt_addr; - data_to_translate.push(chunk); - } - } - } - - if data_to_translate.is_empty() { - break; - } - - if let Err(err) = self.read_pt_address_iter( - mem, - pt_step, - &mut data_to_translate_map, - &mut data_to_translate, - &mut data_pt_buf, - &mut data_pt_read, - out_fail, - ) { - vtop_trace!("read_pt_address_iter failure: {}", err); - out_fail.extend( - data_to_translate - .into_iter() - .flat_map(|chunk| chunk.vec.into_iter()) - .map(|data| (err, data.addr, data.buf)), - ); - return; - } - } - - debug_assert!(data_to_translate.is_empty()); - } - - //TODO: Clean this up to have less args - #[allow(clippy::too_many_arguments)] - fn read_pt_address_iter<'a, T, B, V, FO>( - &self, - mem: &mut T, - step: usize, - addr_map: &mut BTreeMap, - addrs: &mut TranslateVec<'a, B>, - pt_buf: &mut BumpVec, - pt_read: &mut BumpVec, - err_out: &mut FO, - ) -> Result<()> - where - T: PhysicalMemory + ?Sized, - FO: Extend<(Error, Address, B)>, - V: Vector>, - B: SplitAtIndex, - { - //TODO: use self.pt_leaf_size(step) (need to handle LittleEndian::read_u64) - let pte_size = 8; - let page_size = self.pt_leaf_size(step); - - //pt_buf.clear(); - pt_buf.resize(pte_size * addrs.len(), 0); - - debug_assert!(pt_read.is_empty()); - - //This is safe, because pt_read gets cleared at the end of the function - let pt_read: &mut BumpVec = unsafe { std::mem::transmute(pt_read) }; - - for (chunk, tr_chunk) in pt_buf.chunks_exact_mut(pte_size).zip(addrs.iter()) { - pt_read.push(PhysicalReadData( - PhysicalAddress::with_page(tr_chunk.pt_addr, PageType::PAGE_TABLE, page_size), - chunk, - )); - } - - mem.phys_read_raw_list(pt_read)?; - - //Filter out duplicate reads - //Ideally, we would want to append all duplicates to the existing list, but they would mostly - //only occur, in strange kernel side situations when building the page map, - //and having such handling may end up highly inefficient (due to having to use map, and remapping it) - addr_map.clear(); - - //Okay, so this is extremely useful in one element reads. - //We kind of have a local on-stack cache to check against - //before a) checking in the set, and b) pushing to the set - let mut prev_addr: Option
= None; - - for i in (0..addrs.len()).rev() { - let mut chunk = addrs.swap_remove(i); - let PhysicalReadData(_, buf) = pt_read.swap_remove(i); - let pt_addr = Address::from(u64::from_le_bytes(buf[0..8].try_into().unwrap())); - - if self.pte_addr_mask(chunk.pt_addr, step) != self.pte_addr_mask(pt_addr, step) - && (prev_addr.is_none() - || (prev_addr.unwrap() != pt_addr && !addr_map.contains_key(&pt_addr))) - { - chunk.pt_addr = pt_addr; - - if let Some(pa) = prev_addr { - addr_map.insert(pa, ()); - } - - prev_addr = Some(pt_addr); - addrs.push(chunk); - continue; - } - - err_out.extend( - chunk - .vec - .into_iter() - .map(|entry| (Error::VirtualTranslate, entry.addr, entry.buf)), - ); - } - - pt_read.clear(); - - Ok(()) - } -} diff --git a/apex_dma/memflow_lib/memflow/src/architecture/mmu_spec/translate_data.rs b/apex_dma/memflow_lib/memflow/src/architecture/mmu_spec/translate_data.rs deleted file mode 100644 index 4a2df18..0000000 --- a/apex_dma/memflow_lib/memflow/src/architecture/mmu_spec/translate_data.rs +++ /dev/null @@ -1,245 +0,0 @@ -use crate::iter::SplitAtIndex; -use crate::types::Address; -use bumpalo::{collections::Vec as BumpVec, Bump}; -use std::cmp::Ordering; - -pub type TranslateVec<'a, T> = BumpVec<'a, TranslationChunk<'a, T>>; - -pub struct TranslateData { - pub addr: Address, - pub buf: T, -} - -impl Ord for TranslateData { - fn cmp(&self, other: &Self) -> Ordering { - self.addr.cmp(&other.addr) - } -} - -impl Eq for TranslateData {} - -impl PartialOrd for TranslateData { - fn partial_cmp(&self, other: &Self) -> Option { - self.addr.partial_cmp(&other.addr) - } -} - -impl PartialEq for TranslateData { - fn eq(&self, other: &Self) -> bool { - self.addr == other.addr - } -} - -impl SplitAtIndex for TranslateData { - fn split_inclusive_at(&mut self, idx: usize) -> (Self, Option) - where - Self: Sized, - { - let addr = self.addr; - - let (bleft, bright) = self.buf.split_inclusive_at(idx); - let bl_len = bleft.length(); - - ( - TranslateData { addr, buf: bleft }, - bright.map(|buf| TranslateData { - buf, - addr: addr + bl_len, - }), - ) - } - - fn split_at(&mut self, idx: usize) -> (Self, Option) - where - Self: Sized, - { - let addr = self.addr; - let (bleft, bright) = self.buf.split_at(idx); - let bl_len = bleft.length(); - - ( - TranslateData { addr, buf: bleft }, - bright.map(|buf| TranslateData { - buf, - addr: addr + bl_len, - }), - ) - } - - fn length(&self) -> usize { - self.buf.length() - } - - fn size_hint(&self) -> usize { - self.buf.size_hint() - } -} - -/// Abstracts away a list of TranslateData in a splittable manner -pub struct TranslationChunk<'a, T> { - pub pt_addr: Address, - pub vec: BumpVec<'a, TranslateData>, - min_addr: Address, - max_addr: Address, -} - -impl<'a, T> TranslationChunk<'a, T> { - pub fn min_addr(&self) -> Address { - self.min_addr - } - - pub fn max_addr(&self) -> Address { - self.max_addr - } -} - -impl<'a, T: SplitAtIndex> TranslationChunk<'a, T> { - pub fn new(pt_addr: Address, vec: BumpVec<'a, TranslateData>) -> Self { - let (min, max) = vec.iter().fold((!0u64, 0u64), |(cmin, cmax), elem| { - ( - std::cmp::min(cmin, elem.addr.as_u64()), - std::cmp::max(cmax, elem.addr.as_u64() + elem.length() as u64), - ) - }); - - Self::with_minmax(pt_addr, vec, min.into(), max.into()) //std::cmp::max(min, max).into()) - } - - pub fn with_minmax( - pt_addr: Address, - vec: BumpVec<'a, TranslateData>, - min_addr: Address, - max_addr: Address, - ) -> Self { - Self { - pt_addr, - vec, - min_addr, - max_addr, - } - } - - pub fn recalc_minmax(&mut self) { - let (min, max) = self.vec.iter().fold((!0u64, 0u64), |(cmin, cmax), elem| { - ( - std::cmp::min(cmin, elem.addr.as_u64()), - std::cmp::max(cmax, elem.addr.as_u64() + elem.length() as u64), - ) - }); - - self.min_addr = min.into(); - self.max_addr = max.into(); - } - - pub fn consume_mut(&mut self, arena: &'a Bump) -> Self { - let pt_addr = std::mem::replace(&mut self.pt_addr, Address::null()); - let vec = std::mem::replace(&mut self.vec, BumpVec::new_in(arena)); - let min_addr = std::mem::replace(&mut self.min_addr, Address::invalid()); - let max_addr = std::mem::replace(&mut self.max_addr, Address::null()); - - Self { - pt_addr, - vec, - min_addr, - max_addr, - } - } - - pub fn merge_with(&mut self, mut other: Self) { - //if other has a vec with larger capacity, then first swap them - if self.vec.capacity() < other.vec.capacity() { - std::mem::swap(self, &mut other); - } - - self.vec.extend(other.vec.into_iter()); - - self.min_addr = std::cmp::min(self.min_addr, other.min_addr); - self.max_addr = std::cmp::max(self.max_addr, other.max_addr); - } - - pub fn split_at_inclusive(mut self, idx: usize, arena: &'a Bump) -> (Self, Option) { - let len = self.max_addr - self.min_addr; - - if len <= idx { - (self, None) - } else { - let mut vec_right = BumpVec::new_in(arena); - let min_addr = self.min_addr; - let end_addr = min_addr + std::cmp::min(len - 1, idx); - let pt_addr = self.pt_addr; - - let mut left_min = Address::invalid(); - let mut left_max = Address::null(); - - let mut right_min = Address::invalid(); - let mut right_max = Address::null(); - - for i in (0..self.vec.len()).rev() { - let data = self.vec.get_mut(i).unwrap(); - if data.addr <= end_addr { - let idx = end_addr - data.addr; - //Need to remove empty ones - let (left, right) = data.split_inclusive_at(idx); - if left.length() > 0 { - left_min = std::cmp::min(left_min, left.addr); - left_max = std::cmp::max(left_max, left.addr + left.length()); - *data = left; - } else { - self.vec.swap_remove(i); - } - if let Some(right) = right { - right_min = std::cmp::min(right_min, right.addr); - right_max = std::cmp::max(right_max, right.addr + right.length()); - vec_right.push(right); - } - } else { - right_min = std::cmp::min(right_min, data.addr); - right_max = std::cmp::max(right_max, data.addr + data.length()); - vec_right.push(self.vec.swap_remove(i)); - } - } - - self.min_addr = left_min; - self.max_addr = left_max; - - if vec_right.is_empty() { - (self, None) - } else { - ( - self, - Some(TranslationChunk::with_minmax( - pt_addr, vec_right, right_min, right_max, - )), - ) - } - } - } -} - -impl<'a, T: SplitAtIndex> SplitAtIndex for (&'a Bump, TranslationChunk<'a, T>) { - fn split_at(&mut self, idx: usize) -> (Self, Option) { - if idx == 0 { - let chunk = self.1.consume_mut(self.0); - ((self.0, self.1.consume_mut(self.0)), Some((self.0, chunk))) - } else { - self.split_inclusive_at(idx - 1) - } - } - - fn split_inclusive_at(&mut self, idx: usize) -> (Self, Option) { - let chunk = self.1.consume_mut(self.0); - let (left, right) = chunk.split_at_inclusive(idx, self.0); - ((self.0, left), right.map(|x| (self.0, x))) - } - - fn unsplit(&mut self, left: Self, right: Option) { - self.1.merge_with(left.1); - if let Some(chunk) = right { - self.1.merge_with(chunk.1); - } - } - - fn length(&self) -> usize { - self.1.max_addr() - self.1.min_addr() - } -} diff --git a/apex_dma/memflow_lib/memflow/src/architecture/mod.rs b/apex_dma/memflow_lib/memflow/src/architecture/mod.rs index 1102ffa..cc75f49 100644 --- a/apex_dma/memflow_lib/memflow/src/architecture/mod.rs +++ b/apex_dma/memflow_lib/memflow/src/architecture/mod.rs @@ -5,7 +5,7 @@ Each architecture implements the `Architecture` trait and all function calls are dispatched into their own architecture specific sub-modules. -Virtual address translations are done using `ScopedVirtualTranslate` +Virtual address translations are done using `VirtualTranslate3` trait, which is linked to a particular architecture. Each architecture also has a `ByteOrder` assigned to it. @@ -13,18 +13,10 @@ When reading/writing data from/to the target it is necessary that memflow know the proper byte order of the target system. */ +pub mod arm; pub mod x86; -mod mmu_spec; - -pub use mmu_spec::ArchMMUSpec; - -use crate::error::{Error, Result}; -use crate::iter::{FnExtend, SplitAtIndex}; -use crate::mem::PhysicalMemory; - -use crate::types::{Address, PhysicalAddress}; -pub use bumpalo::{collections::Vec as BumpVec, Bump}; +use crate::types::size; /// Identifies the byte order of a architecture /// @@ -32,9 +24,10 @@ pub use bumpalo::{collections::Vec as BumpVec, Bump}; /// The memory will be automatically converted to the endianess memflow is currently running on. /// /// See the [wikipedia article](https://en.wikipedia.org/wiki/Endianness) for more information on the subject. +#[repr(u8)] #[derive(Debug, Clone, Copy, Eq, PartialEq)] #[cfg_attr(feature = "serde", derive(::serde::Serialize))] -#[repr(u8)] +#[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))] pub enum Endianess { /// Little Endianess LittleEndian, @@ -42,56 +35,6 @@ pub enum Endianess { BigEndian, } -/// Translates virtual memory to physical using internal translation base (usually a process' dtb) -/// -/// This trait abstracts virtual address translation for a single virtual memory scope. -/// On x86 architectures, it is a single `Address` - a CR3 register. But other architectures may -/// use multiple translation bases, or use a completely different translation mechanism (MIPS). -pub trait ScopedVirtualTranslate: Clone + Copy + Send { - fn virt_to_phys( - &self, - mem: &mut T, - addr: Address, - ) -> Result { - let arena = Bump::new(); - let mut output = None; - let mut success = FnExtend::new(|elem: (PhysicalAddress, _)| { - if output.is_none() { - output = Some(elem.0); - } - }); - let mut output_err = None; - let mut fail = FnExtend::new(|elem: (Error, _, _)| output_err = Some(elem.0)); - self.virt_to_phys_iter( - mem, - Some((addr, 1)).into_iter(), - &mut success, - &mut fail, - &arena, - ); - output.map(Ok).unwrap_or_else(|| Err(output_err.unwrap())) - } - - fn virt_to_phys_iter< - T: PhysicalMemory + ?Sized, - B: SplitAtIndex, - VI: Iterator, - VO: Extend<(PhysicalAddress, B)>, - FO: Extend<(Error, Address, B)>, - >( - &self, - mem: &mut T, - addrs: VI, - out: &mut VO, - out_fail: &mut FO, - arena: &Bump, - ); - - fn translation_table_id(&self, address: Address) -> usize; - - fn arch(&self) -> ArchitectureObj; -} - pub trait Architecture: Send + Sync + 'static { /// Returns the number of bits of a pointers width on a `Architecture`. /// Currently this will either return 64 or 32 depending on the pointer width of the target. @@ -143,6 +86,10 @@ pub trait Architecture: Send + Sync + 'static { /// This function will return the pointer width as a `usize` value. /// See `Architecture::bits()` for more information. /// + /// # Remarks + /// + /// The pointer width will never overflow a `usize` value. + /// /// # Examples /// /// ``` @@ -165,6 +112,9 @@ pub trait Architecture: Send + Sync + 'static { /// /// ``` fn address_space_bits(&self) -> u8; + + /// Returns a FFI-safe identifier + fn ident(&self) -> ArchitectureIdent; } impl std::fmt::Debug for ArchitectureObj { @@ -190,6 +140,61 @@ impl std::cmp::PartialEq for ArchitectureObj { } } +#[repr(C)] +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +#[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))] +pub enum ArchitectureIdent { + /// Unknown architecture. Could be third-party implemented. memflow knows how to work on them, + /// but is unable to instantiate them. + Unknown(usize), + /// X86 with specified bitness and address extensions + /// + /// First argument - `bitness` controls whether it's 32, or 64 bit variant. + /// Second argument - `address_extensions` control whether address extensions are + /// enabled (PAE on x32, or LA57 on x64). Warning: LA57 is currently unsupported. + X86(u8, bool), + /// Arm 64-bit architecture with specified page size + /// + /// Valid page sizes are 4kb, 16kb, 64kb. Only 4kb is supported at the moment + AArch64(usize), +} + +impl std::fmt::Display for ArchitectureIdent { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ArchitectureIdent::X86(32, false) => f.pad("x86_32"), + ArchitectureIdent::X86(32, true) => f.pad("x86_32 PAE"), + ArchitectureIdent::X86(64, false) => f.pad("x86_64"), + ArchitectureIdent::X86(64, true) => f.pad("x86_64 LA57"), + ArchitectureIdent::X86(_, _) => f.pad("x86"), + ArchitectureIdent::AArch64(_) => f.pad("AArch64"), + ArchitectureIdent::Unknown(id) => f.debug_tuple("Unknown").field(&id).finish(), + } + } +} + +impl ArchitectureIdent { + pub fn into_obj(self) -> ArchitectureObj { + self.into() + } +} + +impl From for ArchitectureObj { + fn from(arch: ArchitectureIdent) -> ArchitectureObj { + const KB4: usize = size::kb(4); + const KB16: usize = size::kb(16); + match arch { + ArchitectureIdent::X86(32, false) => x86::x32::ARCH, + ArchitectureIdent::X86(32, true) => x86::x32_pae::ARCH, + ArchitectureIdent::X86(64, false) => x86::x64::ARCH, + ArchitectureIdent::AArch64(KB4) => arm::aarch64::ARCH, + ArchitectureIdent::AArch64(KB16) => arm::aarch64::ARCH_16K, + _ => panic!("unsupported architecture! {:?}", arch), + } + } +} + #[cfg(feature = "serde")] impl serde::Serialize for ArchitectureObj { fn serialize(&self, serializer: S) -> core::result::Result diff --git a/apex_dma/memflow_lib/memflow/src/architecture/x86/mod.rs b/apex_dma/memflow_lib/memflow/src/architecture/x86/mod.rs index 2cc8878..438de79 100644 --- a/apex_dma/memflow_lib/memflow/src/architecture/x86/mod.rs +++ b/apex_dma/memflow_lib/memflow/src/architecture/x86/mod.rs @@ -2,24 +2,25 @@ pub mod x32; pub mod x32_pae; pub mod x64; -use super::{ - mmu_spec::{translate_data::TranslateVec, ArchMMUSpec, MMUTranslationBase}, - Architecture, ArchitectureObj, Endianess, ScopedVirtualTranslate, +use super::{Architecture, ArchitectureIdent, ArchitectureObj, Endianess}; + +use crate::mem::virt_translate::{ + mmu::ArchMmuSpec, VirtualTranslate3, VtopFailureCallback, VtopOutputCallback, }; -use super::Bump; -use crate::error::{Error, Result}; +use crate::error::{Error, ErrorKind, ErrorOrigin, Result}; use crate::iter::SplitAtIndex; use crate::mem::PhysicalMemory; -use crate::types::{Address, PhysicalAddress}; +use crate::types::{umem, Address}; +use cglue::tuple::*; + +use std::ptr; pub struct X86Architecture { /// Defines how many bits does the native word size have bits: u8, - /// Defines the byte order of the architecture - endianess: Endianess, /// Defines the underlying MMU used for address translation - mmu: ArchMMUSpec, + mmu: ArchMmuSpec, } impl Architecture for X86Architecture { @@ -28,59 +29,61 @@ impl Architecture for X86Architecture { } fn endianess(&self) -> Endianess { - self.endianess + self.mmu.def.endianess } fn page_size(&self) -> usize { - self.mmu.page_size_level(1) + self.mmu.page_size_level(1) as usize } fn size_addr(&self) -> usize { - self.mmu.addr_size.into() + self.mmu.def.addr_size.into() } fn address_space_bits(&self) -> u8 { - self.mmu.address_space_bits + self.mmu.def.address_space_bits + } + + fn ident(&self) -> ArchitectureIdent { + ArchitectureIdent::X86( + self.bits, + ptr::eq(self as *const _, &x32_pae::ARCH_SPEC as *const _), + ) } } #[derive(Clone, Copy)] -pub struct X86ScopedVirtualTranslate { +pub struct X86VirtualTranslate { arch: &'static X86Architecture, - dtb: X86PageTableBase, + dtb: Address, } -impl X86ScopedVirtualTranslate { +impl X86VirtualTranslate { pub fn new(arch: &'static X86Architecture, dtb: Address) -> Self { - Self { - arch, - dtb: X86PageTableBase(dtb), - } + Self { arch, dtb } } } -impl ScopedVirtualTranslate for X86ScopedVirtualTranslate { +impl VirtualTranslate3 for X86VirtualTranslate { fn virt_to_phys_iter< T: PhysicalMemory + ?Sized, B: SplitAtIndex, - VI: Iterator, - VO: Extend<(PhysicalAddress, B)>, - FO: Extend<(Error, Address, B)>, + VI: Iterator>, >( &self, mem: &mut T, addrs: VI, - out: &mut VO, - out_fail: &mut FO, - arena: &Bump, + out: &mut VtopOutputCallback, + out_fail: &mut VtopFailureCallback, + tmp_buf: &mut [std::mem::MaybeUninit], ) { self.arch .mmu - .virt_to_phys_iter(mem, self.dtb, addrs, out, out_fail, arena) + .virt_to_phys_iter(mem, self.dtb, addrs, out, out_fail, tmp_buf) } - fn translation_table_id(&self, _address: Address) -> usize { - self.dtb.0.as_u64().overflowing_shr(12).0 as usize + fn translation_table_id(&self, _address: Address) -> umem { + self.dtb.to_umem().overflowing_shr(12).0 } fn arch(&self) -> ArchitectureObj { @@ -88,37 +91,6 @@ impl ScopedVirtualTranslate for X86ScopedVirtualTranslate { } } -#[repr(transparent)] -#[derive(Clone, Copy)] -pub struct X86PageTableBase(Address); - -impl MMUTranslationBase for X86PageTableBase { - fn get_initial_pt(&self, _: Address) -> Address { - self.0 - } - - fn get_pt_by_index(&self, _: usize) -> Address { - self.0 - } - - fn pt_count(&self) -> usize { - 1 - } - - fn virt_addr_filter( - &self, - spec: &ArchMMUSpec, - addr: (Address, B), - data_to_translate: &mut TranslateVec, - out_fail: &mut O, - ) where - B: SplitAtIndex, - O: Extend<(Error, Address, B)>, - { - spec.virt_addr_filter(addr, &mut data_to_translate[0].vec, out_fail); - } -} - // This lint doesn't make any sense in our usecase, since we nevel leak ARCH_SPECs, and ARCH is // a static trait object with a consistent address. fn underlying_arch(arch: ArchitectureObj) -> Option<&'static X86Architecture> { @@ -133,9 +105,10 @@ fn underlying_arch(arch: ArchitectureObj) -> Option<&'static X86Architecture> { } } -pub fn new_translator(dtb: Address, arch: ArchitectureObj) -> Result { - let arch = underlying_arch(arch).ok_or(Error::InvalidArchitecture)?; - Ok(X86ScopedVirtualTranslate::new(arch, dtb)) +pub fn new_translator(dtb: Address, arch: ArchitectureObj) -> Result { + let arch = + underlying_arch(arch).ok_or(Error(ErrorOrigin::Mmu, ErrorKind::InvalidArchitecture))?; + Ok(X86VirtualTranslate::new(arch, dtb)) } pub fn is_x86_arch(arch: ArchitectureObj) -> bool { diff --git a/apex_dma/memflow_lib/memflow/src/architecture/x86/x32.rs b/apex_dma/memflow_lib/memflow/src/architecture/x86/x32.rs index 89aa034..f0950cf 100644 --- a/apex_dma/memflow_lib/memflow/src/architecture/x86/x32.rs +++ b/apex_dma/memflow_lib/memflow/src/architecture/x86/x32.rs @@ -1,40 +1,43 @@ use super::{ - super::{ArchMMUSpec, ArchitectureObj, Endianess, ScopedVirtualTranslate}, - X86Architecture, X86ScopedVirtualTranslate, + super::{ArchitectureObj, Endianess}, + X86Architecture, X86VirtualTranslate, }; +use crate::mem::virt_translate::mmu::ArchMmuDef; + use crate::types::Address; -pub(super) const ARCH_SPEC: X86Architecture = X86Architecture { +pub(super) static ARCH_SPEC: X86Architecture = X86Architecture { bits: 32, - endianess: Endianess::LittleEndian, - mmu: ArchMMUSpec { + mmu: ArchMmuDef { virtual_address_splits: &[10, 10, 12], valid_final_page_steps: &[1, 2], address_space_bits: 32, + endianess: Endianess::LittleEndian, addr_size: 4, pte_size: 4, - present_bit: 0, - writeable_bit: 1, - nx_bit: 31, //Actually, NX is unsupported in x86 non-PAE, we have to do something about it - large_page_bit: 7, - }, + present_bit: |a| a.bit_at(0), + writeable_bit: |a, pb| pb || a.bit_at(1), + nx_bit: |_, _| false, + large_page_bit: |a| a.bit_at(7), + } + .into_spec(), }; pub static ARCH: ArchitectureObj = &ARCH_SPEC; -pub fn new_translator(dtb: Address) -> impl ScopedVirtualTranslate { - X86ScopedVirtualTranslate::new(&ARCH_SPEC, dtb) +pub fn new_translator(dtb: Address) -> X86VirtualTranslate { + X86VirtualTranslate::new(&ARCH_SPEC, dtb) } //x64 tests MMU rigorously, here we will only test a few special cases #[cfg(test)] mod tests { - use crate::architecture::mmu_spec::ArchMMUSpec; - use crate::types::{size, Address}; + use crate::mem::virt_translate::mmu::ArchMmuSpec; + use crate::types::{mem, size, Address}; - fn get_mmu_spec() -> ArchMMUSpec { - super::ARCH_SPEC.mmu + fn get_mmu_spec() -> &'static ArchMmuSpec { + &super::ARCH_SPEC.mmu } #[test] @@ -43,15 +46,15 @@ mod tests { let mask_addr = Address::invalid(); assert_eq!( mmu.pte_addr_mask(mask_addr, 0), - Address::bit_mask(12..31).as_u64() + Address::bit_mask(12..=31).to_umem() ); assert_eq!( mmu.pte_addr_mask(mask_addr, 1), - Address::bit_mask(12..31).as_u64() + Address::bit_mask(12..=31).to_umem() ); assert_eq!( mmu.pte_addr_mask(mask_addr, 2), - Address::bit_mask(12..31).as_u64() + Address::bit_mask(12..=31).to_umem() ); } @@ -65,7 +68,7 @@ mod tests { #[test] fn x86_page_size_level() { let mmu = get_mmu_spec(); - assert_eq!(mmu.page_size_level(1), size::kb(4)); - assert_eq!(mmu.page_size_level(2), size::mb(4)); + assert_eq!(mmu.page_size_level(1), mem::kb(4)); + assert_eq!(mmu.page_size_level(2), mem::mb(4)); } } diff --git a/apex_dma/memflow_lib/memflow/src/architecture/x86/x32_pae.rs b/apex_dma/memflow_lib/memflow/src/architecture/x86/x32_pae.rs index d0f971b..99e37a7 100644 --- a/apex_dma/memflow_lib/memflow/src/architecture/x86/x32_pae.rs +++ b/apex_dma/memflow_lib/memflow/src/architecture/x86/x32_pae.rs @@ -1,40 +1,43 @@ use super::{ - super::{ArchMMUSpec, ArchitectureObj, Endianess, ScopedVirtualTranslate}, - X86Architecture, X86ScopedVirtualTranslate, + super::{ArchitectureObj, Endianess}, + X86Architecture, X86VirtualTranslate, }; +use crate::mem::virt_translate::mmu::ArchMmuDef; + use crate::types::Address; -pub(super) const ARCH_SPEC: X86Architecture = X86Architecture { +pub(super) static ARCH_SPEC: X86Architecture = X86Architecture { bits: 32, - endianess: Endianess::LittleEndian, - mmu: ArchMMUSpec { + mmu: ArchMmuDef { virtual_address_splits: &[2, 9, 9, 12], valid_final_page_steps: &[2, 3], address_space_bits: 36, + endianess: Endianess::LittleEndian, addr_size: 4, pte_size: 8, - present_bit: 0, - writeable_bit: 1, - nx_bit: 63, - large_page_bit: 7, - }, + present_bit: |a| a.bit_at(0), + writeable_bit: |a, pb| pb || a.bit_at(1), + nx_bit: |a, pb| pb || a.bit_at(63), + large_page_bit: |a| a.bit_at(7), + } + .into_spec(), }; pub static ARCH: ArchitectureObj = &ARCH_SPEC; -pub fn new_translator(dtb: Address) -> impl ScopedVirtualTranslate { - X86ScopedVirtualTranslate::new(&ARCH_SPEC, dtb) +pub fn new_translator(dtb: Address) -> X86VirtualTranslate { + X86VirtualTranslate::new(&ARCH_SPEC, dtb) } //x64 tests MMU rigorously, here we will only test a few special cases #[cfg(test)] mod tests { - use crate::architecture::mmu_spec::ArchMMUSpec; - use crate::types::{size, Address}; + use crate::mem::virt_translate::mmu::ArchMmuSpec; + use crate::types::{mem, size, Address}; - fn get_mmu_spec() -> ArchMMUSpec { - super::ARCH_SPEC.mmu + fn get_mmu_spec() -> &'static ArchMmuSpec { + &super::ARCH_SPEC.mmu } #[test] @@ -43,15 +46,15 @@ mod tests { let mask_addr = Address::invalid(); assert_eq!( mmu.pte_addr_mask(mask_addr, 0), - Address::bit_mask(5..35).as_u64() + Address::bit_mask(5..=35).to_umem() ); assert_eq!( mmu.pte_addr_mask(mask_addr, 1), - Address::bit_mask(12..35).as_u64() + Address::bit_mask(12..=35).to_umem() ); assert_eq!( mmu.pte_addr_mask(mask_addr, 2), - Address::bit_mask(12..35).as_u64() + Address::bit_mask(12..=35).to_umem() ); } @@ -65,7 +68,7 @@ mod tests { #[test] fn x86_pae_page_size_level() { let mmu = get_mmu_spec(); - assert_eq!(mmu.page_size_level(1), size::kb(4)); - assert_eq!(mmu.page_size_level(2), size::mb(2)); + assert_eq!(mmu.page_size_level(1), mem::kb(4)); + assert_eq!(mmu.page_size_level(2), mem::mb(2)); } } diff --git a/apex_dma/memflow_lib/memflow/src/architecture/x86/x64.rs b/apex_dma/memflow_lib/memflow/src/architecture/x86/x64.rs index bc44c85..4808956 100644 --- a/apex_dma/memflow_lib/memflow/src/architecture/x86/x64.rs +++ b/apex_dma/memflow_lib/memflow/src/architecture/x86/x64.rs @@ -1,39 +1,42 @@ use super::{ - super::{ArchMMUSpec, ArchitectureObj, Endianess, ScopedVirtualTranslate}, - X86Architecture, X86ScopedVirtualTranslate, + super::{ArchitectureObj, Endianess}, + X86Architecture, X86VirtualTranslate, }; +use crate::mem::virt_translate::mmu::ArchMmuDef; + use crate::types::Address; -pub(super) const ARCH_SPEC: X86Architecture = X86Architecture { +pub(super) static ARCH_SPEC: X86Architecture = X86Architecture { bits: 64, - endianess: Endianess::LittleEndian, - mmu: ArchMMUSpec { + mmu: ArchMmuDef { virtual_address_splits: &[9, 9, 9, 9, 12], valid_final_page_steps: &[2, 3, 4], address_space_bits: 52, + endianess: Endianess::LittleEndian, addr_size: 8, pte_size: 8, - present_bit: 0, - writeable_bit: 1, - nx_bit: 63, - large_page_bit: 7, - }, + present_bit: |a| a.bit_at(0), + writeable_bit: |a, pb| pb || a.bit_at(1), + nx_bit: |a, pb| pb || a.bit_at(63), + large_page_bit: |a| a.bit_at(7), + } + .into_spec(), }; pub static ARCH: ArchitectureObj = &ARCH_SPEC; -pub fn new_translator(dtb: Address) -> impl ScopedVirtualTranslate { - X86ScopedVirtualTranslate::new(&ARCH_SPEC, dtb) +pub fn new_translator(dtb: Address) -> X86VirtualTranslate { + X86VirtualTranslate::new(&ARCH_SPEC, dtb) } #[cfg(test)] mod tests { - use crate::architecture::mmu_spec::ArchMMUSpec; - use crate::types::{size, Address, PageType}; + use crate::mem::virt_translate::mmu::{ArchMmuSpec, FlagsType}; + use crate::types::{mem, size, umem, Address, PageType}; - fn get_mmu_spec() -> ArchMMUSpec { - super::ARCH_SPEC.mmu + fn get_mmu_spec() -> &'static ArchMmuSpec { + &super::ARCH_SPEC.mmu } #[test] @@ -42,19 +45,19 @@ mod tests { let mask_addr = Address::invalid(); assert_eq!( mmu.pte_addr_mask(mask_addr, 0), - Address::bit_mask(12..51).as_u64() + Address::bit_mask(12..=51).to_umem() ); assert_eq!( mmu.pte_addr_mask(mask_addr, 1), - Address::bit_mask(12..51).as_u64() + Address::bit_mask(12..=51).to_umem() ); assert_eq!( mmu.pte_addr_mask(mask_addr, 2), - Address::bit_mask(12..51).as_u64() + Address::bit_mask(12..=51).to_umem() ); assert_eq!( mmu.pte_addr_mask(mask_addr, 3), - Address::bit_mask(12..51).as_u64() + Address::bit_mask(12..=51).to_umem() ); } @@ -72,17 +75,17 @@ mod tests { #[test] fn x64_page_size_level() { let mmu = get_mmu_spec(); - assert_eq!(mmu.page_size_level(1), size::kb(4)); - assert_eq!(mmu.page_size_level(2), size::mb(2)); - assert_eq!(mmu.page_size_level(3), size::gb(1)); + assert_eq!(mmu.page_size_level(1), mem::kb(4)); + assert_eq!(mmu.page_size_level(2), mem::mb(2)); + assert_eq!(mmu.page_size_level(3), mem::gb(1)); } #[test] fn x64_page_size_step() { let mmu = get_mmu_spec(); - assert_eq!(mmu.page_size_step(2), size::gb(1)); - assert_eq!(mmu.page_size_step(3), size::mb(2)); - assert_eq!(mmu.page_size_step(4), size::kb(4)); + assert_eq!(mmu.page_size_step(2), mem::gb(1)); + assert_eq!(mmu.page_size_step(3), mem::mb(2)); + assert_eq!(mmu.page_size_step(4), mem::kb(4)); } #[test] @@ -90,7 +93,7 @@ mod tests { #[cfg(debug_assertions)] fn x64_page_size_level_4() { let mmu = get_mmu_spec(); - assert_eq!(mmu.page_size_level(4), size::gb(512)); + assert_eq!(mmu.page_size_level(4), mem::gb(512)); } #[test] @@ -98,7 +101,7 @@ mod tests { #[cfg(debug_assertions)] fn x64_page_size_level_5() { let mmu = get_mmu_spec(); - assert_eq!(mmu.page_size_level(5), size::gb(512 * 512)); + assert_eq!(mmu.page_size_level(5), mem::gb(512 * 512)); } #[test] @@ -108,11 +111,11 @@ mod tests { let virt_address = indices .iter() .rev() - .map(|i| *i as u64) + .map(|i| *i as umem) .enumerate() .fold(0, |state, (lvl, idx)| state | (idx << (12 + 9 * lvl))) .into(); - let pte_address = Address::from(size::kb(4 * 45)); + let pte_address = Address::from(mem::kb(4 * 45)); assert_eq!( mmu.vtop_step(pte_address, virt_address, 0), pte_address + (indices[0] * 8) @@ -134,74 +137,85 @@ mod tests { #[test] fn x64_get_phys_page() { let mmu = get_mmu_spec(); - let indices = [145_usize, 54, 64, 21]; - let page_offset = 1243_usize; + let indices: [umem; 4] = [145, 54, 64, 21]; + let page_offset: umem = 1243; let virt_address = indices .iter() .rev() - .map(|i| *i as u64) + .map(|i| *i as umem) .enumerate() - .fold(page_offset as u64, |state, (lvl, idx)| { + .fold(page_offset as umem, |state, (lvl, idx)| { state | (idx << (12 + 9 * lvl)) }) .into(); - let pte_address = Address::from(size::gb(57)); + let pte_address = Address::from(mem::gb(57)); + let prev_flags = FlagsType::NONE; assert_eq!( - mmu.get_phys_page(pte_address, virt_address, 4).page_type(), + mmu.get_phys_page(pte_address, virt_address, 4, prev_flags) + .page_type(), PageType::READ_ONLY ); assert_eq!( - mmu.get_phys_page(pte_address, virt_address, 4).page_size(), - size::kb(4) + mmu.get_phys_page(pte_address, virt_address, 4, prev_flags) + .page_size(), + mem::kb(4) ); assert_eq!( - mmu.get_phys_page(pte_address, virt_address, 2).page_base(), + mmu.get_phys_page(pte_address, virt_address, 2, prev_flags) + .page_base(), pte_address ); assert_eq!( - mmu.get_phys_page(pte_address, virt_address, 4).address(), + mmu.get_phys_page(pte_address, virt_address, 4, prev_flags) + .address(), pte_address + page_offset ); assert_eq!( - mmu.get_phys_page(pte_address, virt_address, 3).address(), - pte_address + size::kb(4 * indices[3]) + page_offset + mmu.get_phys_page(pte_address, virt_address, 3, prev_flags) + .address(), + pte_address + mem::kb(4 * indices[3]) + page_offset ); assert_eq!( - mmu.get_phys_page(pte_address, virt_address, 2).address(), - pte_address + size::mb(2 * indices[2]) + size::kb(4 * indices[3]) + page_offset + mmu.get_phys_page(pte_address, virt_address, 2, prev_flags) + .address(), + pte_address + mem::mb(2 * indices[2]) + mem::kb(4 * indices[3]) + page_offset ); } #[test] fn x64_check_entry() { let mmu = get_mmu_spec(); + let pte_address = 1.into(); - assert_eq!(mmu.check_entry(pte_address, 0), true); - assert_eq!(mmu.check_entry(pte_address, 1), true); - assert_eq!(mmu.check_entry(pte_address, 2), true); - assert_eq!(mmu.check_entry(pte_address, 3), true); - assert_eq!(mmu.check_entry(pte_address, 4), true); - let pte_address = 0.into(); - assert_eq!(mmu.check_entry(pte_address, 0), true); - assert_eq!(mmu.check_entry(pte_address, 3), false); + assert!(mmu.check_entry(pte_address, 0)); + assert!(mmu.check_entry(pte_address, 1)); + assert!(mmu.check_entry(pte_address, 2)); + assert!(mmu.check_entry(pte_address, 3)); + assert!(mmu.check_entry(pte_address, 4)); + + let pte_address = Address::null(); + assert!(mmu.check_entry(pte_address, 0)); + assert!(!mmu.check_entry(pte_address, 3)); } #[test] fn x64_is_final_mapping() { let mmu = get_mmu_spec(); + let pte_address = (1 << 7).into(); - assert_eq!(mmu.is_final_mapping(pte_address, 0), false); - assert_eq!(mmu.is_final_mapping(pte_address, 1), false); - assert_eq!(mmu.is_final_mapping(pte_address, 2), true); - assert_eq!(mmu.is_final_mapping(pte_address, 3), true); - assert_eq!(mmu.is_final_mapping(pte_address, 4), true); - let pte_address = 0.into(); - assert_eq!(mmu.is_final_mapping(pte_address, 0), false); - assert_eq!(mmu.is_final_mapping(pte_address, 1), false); - assert_eq!(mmu.is_final_mapping(pte_address, 2), false); - assert_eq!(mmu.is_final_mapping(pte_address, 3), false); - assert_eq!(mmu.is_final_mapping(pte_address, 4), true); + assert!(!mmu.is_final_mapping(pte_address, 0)); + assert!(!mmu.is_final_mapping(pte_address, 1)); + assert!(mmu.is_final_mapping(pte_address, 2)); + assert!(mmu.is_final_mapping(pte_address, 3)); + assert!(mmu.is_final_mapping(pte_address, 4)); + + let pte_address = Address::null(); + assert!(!mmu.is_final_mapping(pte_address, 0)); + assert!(!mmu.is_final_mapping(pte_address, 1)); + assert!(!mmu.is_final_mapping(pte_address, 2)); + assert!(!mmu.is_final_mapping(pte_address, 3)); + assert!(mmu.is_final_mapping(pte_address, 4)); } } diff --git a/apex_dma/memflow_lib/memflow/src/connector/args.rs b/apex_dma/memflow_lib/memflow/src/connector/args.rs deleted file mode 100644 index 4decfe4..0000000 --- a/apex_dma/memflow_lib/memflow/src/connector/args.rs +++ /dev/null @@ -1,183 +0,0 @@ -/*! -Connector argument handler. -*/ - -use std::prelude::v1::*; - -use crate::error::{Error, Result}; - -use core::convert::TryFrom; -use hashbrown::HashMap; - -/// Argument wrapper for connectors -/// -/// # Examples -/// -/// Construct from a string: -/// ``` -/// use memflow::connector::ConnectorArgs; -/// use std::convert::TryFrom; -/// -/// let argstr = "opt1=test1,opt2=test2,opt3=test3"; -/// let args = ConnectorArgs::parse(argstr).unwrap(); -/// ``` -/// -/// Construct as builder: -/// ``` -/// use memflow::connector::ConnectorArgs; -/// -/// let args = ConnectorArgs::new() -/// .insert("arg1", "test1") -/// .insert("arg2", "test2"); -/// ``` -#[derive(Debug, Clone)] -pub struct ConnectorArgs { - map: HashMap, -} - -impl ConnectorArgs { - /// Creates an empty `ConnectorArgs` struct. - pub fn new() -> Self { - Self { - map: HashMap::new(), - } - } - - /// Creates a `ConnectorArgs` struct with a default (unnamed) value. - pub fn with_default(value: &str) -> Self { - Self::new().insert("default", value) - } - - /// Tries to create a `ConnectorArgs` structure from an argument string. - /// - /// The argument string is a string of comma seperated key-value pairs. - /// - /// An argument string can just contain keys and values: - /// `opt1=val1,opt2=val2,opt3=val3` - /// - /// The argument string can also contain a default value as the first entry - /// which will be placed as a default argument: - /// `default_value,opt1=val1,opt2=val2` - /// - /// This function can be used to initialize a connector from user input. - pub fn parse(args: &str) -> Result { - let mut map = HashMap::new(); - - // if args != "" { - let split = args.split(','); - for (i, kv) in split.clone().enumerate() { - let kvsplit = kv.split('=').collect::>(); - if kvsplit.len() == 2 { - map.insert(kvsplit[0].to_string(), kvsplit[1].to_string()); - } else if i == 0 && kv != "" { - map.insert("default".to_string(), kv.to_string()); - } - } - // } - - Ok(Self { map }) - } - - /// Consumes self, inserts the given key-value pair and returns the self again. - /// - /// This function can be used as a builder pattern when programatically - /// configuring connectors. - /// - /// # Examples - /// - /// ``` - /// use memflow::connector::ConnectorArgs; - /// - /// let args = ConnectorArgs::new() - /// .insert("arg1", "test1") - /// .insert("arg2", "test2"); - /// ``` - pub fn insert(mut self, key: &str, value: &str) -> Self { - self.map.insert(key.to_string(), value.to_string()); - self - } - - /// Tries to retrieve an entry from the options map. - /// If the entry was not found this function returns a `None` value. - pub fn get(&self, key: &str) -> Option<&String> { - self.map.get(key) - } - - /// Tries to retrieve the default entry from the options map. - /// If the entry was not found this function returns a `None` value. - /// - /// This function is a convenience wrapper for `args.get("default")`. - pub fn get_default(&self) -> Option<&String> { - self.get("default") - } -} - -impl Default for ConnectorArgs { - fn default() -> Self { - ConnectorArgs::new() - } -} - -impl TryFrom<&str> for ConnectorArgs { - type Error = Error; - - fn try_from(args: &str) -> Result { - ConnectorArgs::parse(args) - } -} - -impl TryFrom for ConnectorArgs { - type Error = Error; - - fn try_from(args: String) -> Result { - ConnectorArgs::parse(&args) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - pub fn from_str() { - let argstr = "opt1=test1,opt2=test2,opt3=test3"; - let args = ConnectorArgs::parse(argstr).unwrap(); - assert_eq!(args.get("opt1").unwrap(), "test1"); - assert_eq!(args.get("opt2").unwrap(), "test2"); - assert_eq!(args.get("opt3").unwrap(), "test3"); - } - - #[test] - pub fn from_str_default() { - let argstr = "test0,opt1=test1,opt2=test2,opt3=test3"; - let args = ConnectorArgs::parse(argstr).unwrap(); - assert_eq!(args.get_default().unwrap(), "test0"); - assert_eq!(args.get("opt1").unwrap(), "test1"); - assert_eq!(args.get("opt2").unwrap(), "test2"); - assert_eq!(args.get("opt3").unwrap(), "test3"); - } - - #[test] - pub fn from_str_default2() { - let argstr = "opt1=test1,test0"; - let args = ConnectorArgs::parse(argstr).unwrap(); - assert_eq!(args.get_default(), None); - assert_eq!(args.get("opt1").unwrap(), "test1"); - } - - #[test] - pub fn builder() { - let args = ConnectorArgs::new() - .insert("arg1", "test1") - .insert("arg2", "test2"); - assert_eq!(args.get("arg1").unwrap(), "test1"); - assert_eq!(args.get("arg2").unwrap(), "test2"); - } - - #[test] - pub fn parse_empty() { - let argstr = "opt1=test1,test0"; - let args = ConnectorArgs::parse(argstr).unwrap(); - assert_eq!(args.get_default(), None); - } -} diff --git a/apex_dma/memflow_lib/memflow/src/connector/cpu_state.rs b/apex_dma/memflow_lib/memflow/src/connector/cpu_state.rs new file mode 100644 index 0000000..9ad5c8a --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/connector/cpu_state.rs @@ -0,0 +1,38 @@ +//! Describes optional cpu state for a connector + +use crate::cglue::*; +use crate::prelude::v1::Result; + +#[cfg_attr(feature = "plugins", cglue_trait)] +#[int_result] +pub trait ConnectorCpuState: Send { + #[wrap_with_obj(crate::connector::cpu_state::CpuState)] + type CpuStateType<'a>: crate::connector::cpu_state::CpuState + 'a + where + Self: 'a; + #[wrap_with_group(crate::connector::cpu_state::IntoCpuState)] + type IntoCpuStateType: crate::connector::cpu_state::CpuState + 'static; + + fn cpu_state(&mut self) -> Result>; + fn into_cpu_state(self) -> Result; +} + +#[cfg(feature = "plugins")] +cglue_trait_group!(IntoCpuState, { CpuState, Clone }, {}); + +#[cfg_attr(feature = "plugins", cglue_trait)] +#[int_result] +#[cglue_forward] +pub trait CpuState { + // TODO: + // max cpu index + // read_register(s) + // write_register(s) + // pause + // resume + // single-step + // breakpoints + + fn pause(&mut self); + fn resume(&mut self); +} diff --git a/apex_dma/memflow_lib/memflow/src/connector/fileio.rs b/apex_dma/memflow_lib/memflow/src/connector/fileio.rs index c6f5a64..cf7b31c 100644 --- a/apex_dma/memflow_lib/memflow/src/connector/fileio.rs +++ b/apex_dma/memflow_lib/memflow/src/connector/fileio.rs @@ -1,15 +1,104 @@ -/*! -Basic connector which works on file i/o operations (`Seek`, `Read`, `Write`). -*/ +//! Basic connector which works on file i/o operations (`Seek`, `Read`, `Write`). -use crate::error::{Error, Result}; -use crate::iter::FnExtend; +use crate::error::{Error, ErrorKind, ErrorOrigin, Result}; use crate::mem::{ - MemoryMap, PhysicalMemory, PhysicalMemoryMetadata, PhysicalReadData, PhysicalWriteData, + opt_call, MemoryMap, PhysicalMemory, PhysicalMemoryMetadata, PhysicalReadMemOps, + PhysicalWriteMemOps, }; -use crate::types::Address; +use crate::types::{umem, Address}; -use std::io::{Read, Seek, SeekFrom, Write}; +use std::fs::File; +use std::io::{self, Read, Seek, SeekFrom, Write}; +use std::ops::{Deref, DerefMut}; + +use crate::cglue::*; + +/// File that implements Clone +/// +/// This file is meant for use with FileIoMemory when clone is needed, and possible Clone panics +/// are acceptable (they should either always, or never happen on a given platform, probably never) +pub struct CloneFile { + file: File, +} + +impl Clone for CloneFile { + /// Clone the file + /// + /// # Panics + /// + /// If file cloning fails. + fn clone(&self) -> Self { + Self { + file: self.file.try_clone().expect( + "Unable to clone file. Multiple open write handles to a single file descriptor are not supported." + ), + } + } +} + +impl Deref for CloneFile { + type Target = File; + + fn deref(&self) -> &Self::Target { + &self.file + } +} + +impl DerefMut for CloneFile { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.file + } +} + +impl From for CloneFile { + fn from(file: File) -> Self { + Self { file } + } +} + +impl Read for CloneFile { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.file.read(buf) + } +} + +impl Read for &CloneFile { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + (&self.file).read(buf) + } +} + +impl Seek for CloneFile { + fn seek(&mut self, pos: SeekFrom) -> io::Result { + self.file.seek(pos) + } +} + +impl Seek for &CloneFile { + fn seek(&mut self, pos: SeekFrom) -> io::Result { + (&self.file).seek(pos) + } +} + +impl Write for CloneFile { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.file.write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.file.flush() + } +} + +impl Write for &CloneFile { + fn write(&mut self, buf: &[u8]) -> io::Result { + (&self.file).write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + (&self.file).flush() + } +} /// Accesses physical memory via file i/o. /// @@ -17,72 +106,116 @@ use std::io::{Read, Seek, SeekFrom, Write}; /// /// # Examples /// ``` -/// use memflow::connector::FileIOMemory; +/// use memflow::connector::{CloneFile, FileIoMemory}; /// use memflow::mem::MemoryMap; /// /// use std::fs::File; /// -/// fn open(file: &File) { -/// let map = MemoryMap::new(); -/// let connector = FileIOMemory::try_with_reader(file, map); +/// fn open(file: File) { +/// let clone_file: CloneFile = file.into(); +/// let connector = FileIoMemory::new(clone_file); /// } /// ``` #[derive(Clone)] -pub struct FileIOMemory { +pub struct FileIoMemory { reader: T, - mem_map: MemoryMap<(Address, usize)>, + mem_map: MemoryMap<(Address, umem)>, } -impl FileIOMemory { - pub fn try_with_reader(reader: T, mem_map: MemoryMap<(Address, usize)>) -> Result { +impl FileIoMemory { + /// Creates a new connector with an identity mapped memory map. + pub fn new(reader: T) -> Result { + // use an identity mapped memory map + Self::with_size(reader, !0) + } + + /// Creates a new connector with an identity mapped memory map with the given `size`. + pub fn with_size(reader: T, size: umem) -> Result { + // use an identity mapped memory map + let mut mem_map = MemoryMap::new(); + mem_map.push_remap(0x0.into(), size, 0x0.into()); + + Self::with_mem_map(reader, mem_map) + } + + /// Creates a new connector with a custom memory map. + pub fn with_mem_map(reader: T, mem_map: MemoryMap<(Address, umem)>) -> Result { Ok(Self { reader, mem_map }) } } -impl PhysicalMemory for FileIOMemory { - fn phys_read_raw_list(&mut self, data: &mut [PhysicalReadData]) -> Result<()> { - let mut void = FnExtend::void(); - for ((file_off, _), buf) in self.mem_map.map_iter( - data.iter_mut() - .map(|PhysicalReadData(addr, buf)| (*addr, &mut **buf)), - &mut void, - ) { - self.reader - .seek(SeekFrom::Start(file_off.as_u64())) - .map_err(|_| Error::Connector("Seek failed"))?; - self.reader - .read_exact(buf) - .map_err(|_| Error::Connector("Read failed"))?; +#[allow(clippy::needless_option_as_deref)] +#[allow(clippy::collapsible_if)] +#[allow(clippy::blocks_in_if_conditions)] +impl PhysicalMemory for FileIoMemory { + fn phys_read_raw_iter(&mut self, mut data: PhysicalReadMemOps) -> Result<()> { + let mut iter = self.mem_map.map_iter(data.inp, data.out_fail); + while let Some(CTup3((file_off, _), meta_addr, mut buf)) = iter.next() { + if self + .reader + .seek(SeekFrom::Start(file_off.to_umem() as u64)) + .map_err(|err| { + Error(ErrorOrigin::Connector, ErrorKind::UnableToSeekFile).log_error(err) + }) + .is_ok() + { + if self + .reader + .read_exact(&mut buf) + .map_err(|err| { + Error(ErrorOrigin::Connector, ErrorKind::UnableToReadFile).log_error(err) + }) + .is_ok() + { + opt_call(data.out.as_deref_mut(), CTup2(meta_addr, buf)); + continue; + } + } + opt_call(iter.fail_out(), CTup2(meta_addr, buf)); } Ok(()) } - fn phys_write_raw_list(&mut self, data: &[PhysicalWriteData]) -> Result<()> { - let mut void = FnExtend::void(); - for ((file_off, _), buf) in self - .mem_map - .map_iter(data.iter().copied().map(<_>::from), &mut void) - { - self.reader - .seek(SeekFrom::Start(file_off.as_u64())) - .map_err(|_| Error::Connector("Seek failed"))?; - self.reader - .write(buf) - .map_err(|_| Error::Connector("Write failed"))?; + fn phys_write_raw_iter(&mut self, mut data: PhysicalWriteMemOps) -> Result<()> { + let mut iter = self.mem_map.map_iter(data.inp, data.out_fail); + while let Some(CTup3((file_off, _), meta_addr, buf)) = iter.next() { + if self + .reader + .seek(SeekFrom::Start(file_off.to_umem() as u64)) + .map_err(|err| { + Error(ErrorOrigin::Connector, ErrorKind::UnableToSeekFile).log_error(err) + }) + .is_ok() + { + if self + .reader + .write_all(&buf) + .map_err(|err| { + Error(ErrorOrigin::Connector, ErrorKind::UnableToWriteFile).log_error(err) + }) + .is_ok() + { + opt_call(data.out.as_deref_mut(), CTup2(meta_addr, buf)); + continue; + } + } + opt_call(iter.fail_out(), CTup2(meta_addr, buf)); } Ok(()) } fn metadata(&self) -> PhysicalMemoryMetadata { PhysicalMemoryMetadata { - size: self - .mem_map - .as_ref() - .iter() - .last() - .map(|map| map.base().as_usize() + map.output().1) - .unwrap(), + max_address: self.mem_map.max_address(), + real_size: self.mem_map.real_size(), readonly: false, + ideal_batch_size: u32::MAX, } } } + +cglue_impl_group!( + FileIoMemory, + crate::plugins::ConnectorInstance, + {} +); diff --git a/apex_dma/memflow_lib/memflow/src/connector/filemap.rs b/apex_dma/memflow_lib/memflow/src/connector/filemap.rs index 68e2da8..100a327 100644 --- a/apex_dma/memflow_lib/memflow/src/connector/filemap.rs +++ b/apex_dma/memflow_lib/memflow/src/connector/filemap.rs @@ -1,53 +1,58 @@ -use crate::error::{Error, Result}; +use crate::error::{Error, ErrorKind, ErrorOrigin, Result}; use crate::mem::MemoryMap; -use crate::types::Address; +use crate::types::{umem, Address}; use memmap::{Mmap, MmapMut, MmapOptions}; +use core::convert::TryInto; use std::fs::File; use std::sync::Arc; use super::mmap::MappedPhysicalMemory; #[derive(Clone)] -pub struct MMAPInfo<'a> { +pub struct MmapInfo<'a> { mem_map: MemoryMap<&'a [u8]>, _buf: Arc, } -impl<'a> AsRef> for MMAPInfo<'a> { +impl<'a> AsRef> for MmapInfo<'a> { fn as_ref(&self) -> &MemoryMap<&'a [u8]> { &self.mem_map } } -impl<'a> MMAPInfo<'a> { - pub fn try_with_filemap(file: File, map: MemoryMap<(Address, usize)>) -> Result { +impl<'a> MmapInfo<'a> { + pub fn try_with_filemap(file: File, map: MemoryMap<(Address, umem)>) -> Result { let file_map = unsafe { - MmapOptions::new() - .map(&file) - .map_err(|_| Error::Connector("unable to map file"))? + MmapOptions::new().map(&file).map_err(|err| { + Error(ErrorOrigin::Connector, ErrorKind::UnableToMapFile).log_error(err) + })? }; Self::try_with_bufmap(file_map, map) } - pub fn try_with_bufmap(buf: Mmap, map: MemoryMap<(Address, usize)>) -> Result { + pub fn try_with_bufmap(buf: Mmap, map: MemoryMap<(Address, umem)>) -> Result { let mut new_map = MemoryMap::new(); - let buf_len = buf.as_ref().len(); + let buf_len = buf.as_ref().len() as umem; let buf_ptr = buf.as_ref().as_ptr(); for (base, (output_base, size)) in map.into_iter() { - if output_base.as_usize() >= buf_len { - return Err(Error::Connector("Memory map is out of range")); + let output_base_umem = output_base.to_umem(); + if output_base_umem >= buf_len { + return Err(Error( + ErrorOrigin::Connector, + ErrorKind::MemoryMapOutOfRange, + )); } - let output_end = std::cmp::min(output_base.as_usize() + size, buf_len); + let output_end = std::cmp::min(output_base_umem + size, buf_len); new_map.push(base, unsafe { std::slice::from_raw_parts( - buf_ptr.add(output_base.as_usize()), - output_end - output_base.as_usize(), + buf_ptr.add(output_base_umem.try_into().unwrap()), + (output_end - output_base_umem).try_into().unwrap(), ) }); } @@ -63,47 +68,51 @@ impl<'a> MMAPInfo<'a> { } } -pub type ReadMappedFilePhysicalMemory<'a> = MappedPhysicalMemory<&'a [u8], MMAPInfo<'a>>; +pub type ReadMappedFilePhysicalMemory<'a> = MappedPhysicalMemory<&'a [u8], MmapInfo<'a>>; -pub struct MMAPInfoMut<'a> { +pub struct MmapInfoMut<'a> { mem_map: MemoryMap<&'a mut [u8]>, _buf: MmapMut, } -impl<'a> AsRef> for MMAPInfoMut<'a> { +impl<'a> AsRef> for MmapInfoMut<'a> { fn as_ref(&self) -> &MemoryMap<&'a mut [u8]> { &self.mem_map } } -impl<'a> MMAPInfoMut<'a> { - pub fn try_with_filemap_mut(file: File, map: MemoryMap<(Address, usize)>) -> Result { +impl<'a> MmapInfoMut<'a> { + pub fn try_with_filemap_mut(file: File, map: MemoryMap<(Address, umem)>) -> Result { let file_map = unsafe { - MmapOptions::new() - .map_mut(&file) - .map_err(|_| Error::Connector("unable to map file"))? + MmapOptions::new().map_mut(&file).map_err(|err| { + Error(ErrorOrigin::Connector, ErrorKind::UnableToMapFile).log_error(err) + })? }; Self::try_with_bufmap_mut(file_map, map) } - pub fn try_with_bufmap_mut(mut buf: MmapMut, map: MemoryMap<(Address, usize)>) -> Result { + pub fn try_with_bufmap_mut(mut buf: MmapMut, map: MemoryMap<(Address, umem)>) -> Result { let mut new_map = MemoryMap::new(); - let buf_len = buf.as_ref().len(); + let buf_len = buf.as_ref().len() as umem; let buf_ptr = buf.as_mut().as_mut_ptr(); for (base, (output_base, size)) in map.into_iter() { - if output_base.as_usize() >= buf_len { - return Err(Error::Connector("Memory map is out of range")); + let output_base_umem = output_base.to_umem(); + if output_base_umem >= buf_len as umem { + return Err(Error( + ErrorOrigin::Connector, + ErrorKind::MemoryMapOutOfRange, + )); } - let output_end = std::cmp::min(output_base.as_usize() + size, buf_len); + let output_end = std::cmp::min(output_base_umem + size, buf_len); new_map.push(base, unsafe { std::slice::from_raw_parts_mut( - buf_ptr.add(output_base.as_usize()), - output_end - output_base.as_usize(), + buf_ptr.add(output_base_umem.try_into().unwrap()), + (output_end - output_base_umem).try_into().unwrap(), ) }); } @@ -119,4 +128,4 @@ impl<'a> MMAPInfoMut<'a> { } } -pub type WriteMappedFilePhysicalMemory<'a> = MappedPhysicalMemory<&'a mut [u8], MMAPInfoMut<'a>>; +pub type WriteMappedFilePhysicalMemory<'a> = MappedPhysicalMemory<&'a mut [u8], MmapInfoMut<'a>>; diff --git a/apex_dma/memflow_lib/memflow/src/connector/inventory.rs b/apex_dma/memflow_lib/memflow/src/connector/inventory.rs deleted file mode 100644 index af83d51..0000000 --- a/apex_dma/memflow_lib/memflow/src/connector/inventory.rs +++ /dev/null @@ -1,421 +0,0 @@ -/*! -Connector inventory interface. -*/ - -use crate::error::{Error, Result}; -use crate::mem::{CloneablePhysicalMemory, PhysicalMemoryBox}; - -use super::ConnectorArgs; - -use std::fs::read_dir; -use std::path::{Path, PathBuf}; -use std::sync::Arc; - -use log::{debug, error, info, warn}; - -use libloading::Library; - -/// Exported memflow connector version -pub const MEMFLOW_CONNECTOR_VERSION: i32 = 5; - -/// Type of a single connector instance -pub type ConnectorType = PhysicalMemoryBox; - -/// Describes a connector -pub struct ConnectorDescriptor { - /// The connector inventory api version for when the connector was built. - /// This has to be set to `MEMFLOW_CONNECTOR_VERSION` of memflow. - /// - /// If the versions mismatch the inventory will refuse to load. - pub connector_version: i32, - - /// The name of the connector. - /// This name will be used when loading a connector from a connector inventory. - pub name: &'static str, - - /// The factory function for the connector. - /// Calling this function will produce new connector instances. - pub factory: extern "C" fn(args: &ConnectorArgs) -> Result, -} - -/// Holds an inventory of available connectors. -pub struct ConnectorInventory { - connectors: Vec, -} - -impl ConnectorInventory { - /// Creates a new inventory of connectors from the provided path. - /// The path has to be a valid directory or the function will fail with an `Error::IO` error. - /// - /// # Safety - /// - /// Loading third party libraries is inherently unsafe and the compiler - /// cannot guarantee that the implementation of the library - /// matches the one specified here. This is especially true if - /// the loaded library implements the necessary interface manually. - /// - /// # Examples - /// - /// Creating a inventory: - /// ``` - /// use memflow::connector::ConnectorInventory; - /// - /// let inventory = unsafe { - /// ConnectorInventory::scan_path("./") - /// }.unwrap(); - /// ``` - pub unsafe fn scan_path>(path: P) -> Result { - let mut dir = PathBuf::default(); - dir.push(path); - - let mut ret = Self { connectors: vec![] }; - ret.add_dir(dir)?; - Ok(ret) - } - - #[doc(hidden)] - #[deprecated] - pub unsafe fn with_path>(path: P) -> Result { - Self::scan_path(path) - } - - /// Creates a new inventory of connectors by searching various paths. - /// - /// It will query PATH, and an additional set of of directories (standard unix ones, if unix, - /// and "HOME/.local/lib" on all OSes) for "memflow" directory, and if there is one, then - /// search for libraries in there. - /// - /// # Safety - /// - /// Loading third party libraries is inherently unsafe and the compiler - /// cannot guarantee that the implementation of the library - /// matches the one specified here. This is especially true if - /// the loaded library implements the necessary interface manually. - /// - /// # Examples - /// - /// Creating an inventory: - /// ``` - /// use memflow::connector::ConnectorInventory; - /// - /// let inventory = unsafe { - /// ConnectorInventory::scan() - /// }; - /// ``` - pub unsafe fn scan() -> Self { - #[cfg(unix)] - let extra_paths: Vec<&str> = vec![ - "/opt", - "/lib", - "/usr/lib/", - "/usr/local/lib", - "/lib32", - "/lib64", - "/usr/lib32", - "/usr/lib64", - "/usr/local/lib32", - "/usr/local/lib64", - ]; - #[cfg(not(unix))] - let extra_paths: Vec<&str> = vec![]; - - let path_iter = extra_paths.into_iter().map(PathBuf::from); - - let path_var = std::env::var_os("PATH"); - let path_iter = path_iter.chain( - path_var - .as_ref() - .map(|p| std::env::split_paths(p)) - .into_iter() - .flatten(), - ); - - #[cfg(unix)] - let path_iter = path_iter.chain( - dirs::home_dir() - .map(|dir| dir.join(".local").join("lib")) - .into_iter(), - ); - - #[cfg(not(unix))] - let path_iter = path_iter.chain(dirs::document_dir().into_iter()); - - let mut ret = Self { connectors: vec![] }; - - for mut path in path_iter { - path.push("memflow"); - ret.add_dir(path).ok(); - } - - if let Ok(pwd) = std::env::current_dir() { - ret.add_dir(pwd).ok(); - } - - ret - } - - #[doc(hidden)] - #[deprecated] - pub unsafe fn try_new() -> Result { - Ok(Self::scan()) - } - - /// Adds a library directory to the inventory - /// - /// # Safety - /// - /// Same as previous functions - compiler can not guarantee the safety of - /// third party library implementations. - pub unsafe fn add_dir(&mut self, dir: PathBuf) -> Result<&mut Self> { - if !dir.is_dir() { - return Err(Error::IO("invalid path argument")); - } - - info!("scanning {:?} for connectors", dir); - - for entry in read_dir(dir).map_err(|_| Error::IO("unable to read directory"))? { - let entry = entry.map_err(|_| Error::IO("unable to read directory entry"))?; - if let Ok(connector) = Connector::try_with(entry.path()) { - if self - .connectors - .iter() - .find(|c| connector.name == c.name) - .is_none() - { - info!("adding connector '{}': {:?}", connector.name, entry.path()); - self.connectors.push(connector); - } else { - debug!( - "skipping connector '{}' because it was added already: {:?}", - connector.name, - entry.path() - ); - } - } - } - - Ok(self) - } - - /// Returns the names of all currently available connectors that can be used - /// when calling `create_connector` or `create_connector_default`. - pub fn available_connectors(&self) -> Vec { - self.connectors - .iter() - .map(|c| c.name.clone()) - .collect::>() - } - - /// Tries to create a new connector instance for the connector with the given name. - /// The connector will be initialized with the args provided to this call. - /// - /// In case no connector could be found this will throw an `Error::Connector`. - /// - /// # Safety - /// - /// Loading third party libraries is inherently unsafe and the compiler - /// cannot guarantee that the implementation of the library - /// matches the one specified here. This is especially true if - /// the loaded library implements the necessary interface manually. - /// - /// It is adviced to use a proc macro for defining a connector. - /// - /// # Examples - /// - /// Creating a connector instance: - /// ```no_run - /// use memflow::connector::{ConnectorInventory, ConnectorArgs}; - /// - /// let inventory = unsafe { - /// ConnectorInventory::scan_path("./") - /// }.unwrap(); - /// let connector = unsafe { - /// inventory.create_connector("coredump", &ConnectorArgs::new()) - /// }.unwrap(); - /// ``` - /// - /// Defining a dynamically loaded connector: - /// ``` - /// use memflow::error::Result; - /// use memflow::types::size; - /// use memflow::mem::dummy::DummyMemory; - /// use memflow::connector::ConnectorArgs; - /// use memflow_derive::connector; - /// - /// #[connector(name = "dummy")] - /// pub fn create_connector(_args: &ConnectorArgs) -> Result { - /// Ok(DummyMemory::new(size::mb(16))) - /// } - /// ``` - pub unsafe fn create_connector( - &self, - name: &str, - args: &ConnectorArgs, - ) -> Result { - let connector = self - .connectors - .iter() - .find(|c| c.name == name) - .ok_or_else(|| { - error!( - "unable to find connector with name '{}'. available connectors are: {}", - name, - self.connectors - .iter() - .map(|c| c.name.clone()) - .collect::>() - .join(", ") - ); - Error::Connector("connector not found") - })?; - connector.create(args) - } - - /// Creates a connector in the same way `create_connector` does but without any arguments provided. - /// - /// # Safety - /// - /// See the above safety section. - /// This function essentially just wraps the above function. - /// - /// # Examples - /// - /// Creating a connector instance: - /// ```no_run - /// use memflow::connector::{ConnectorInventory, ConnectorArgs}; - /// - /// let inventory = unsafe { - /// ConnectorInventory::scan_path("./") - /// }.unwrap(); - /// let connector = unsafe { - /// inventory.create_connector_default("coredump") - /// }.unwrap(); - /// ``` - pub unsafe fn create_connector_default(&self, name: &str) -> Result { - self.create_connector(name, &ConnectorArgs::default()) - } -} - -/// Stores a connector library instance. -/// -/// # Examples -/// -/// Creating a connector instance: -/// ```no_run -/// use memflow::connector::{Connector, ConnectorArgs}; -/// -/// let connector_lib = unsafe { -/// Connector::try_with("./connector.so") -/// }.unwrap(); -/// -/// let connector = unsafe { -/// connector_lib.create(&ConnectorArgs::new()) -/// }.unwrap(); -/// ``` -#[derive(Clone)] -pub struct Connector { - _library: Arc, - name: String, - factory: extern "C" fn(args: &ConnectorArgs) -> Result, -} - -impl Connector { - /// Tries to initialize a connector from a `Path`. - /// The path must point to a valid dynamic library that implements - /// the memflow inventory interface. - /// - /// If the connector does not contain the necessary exports or the version does - /// not match the current api version this function will return an `Error::Connector`. - /// - /// # Safety - /// - /// Loading third party libraries is inherently unsafe and the compiler - /// cannot guarantee that the implementation of the library - /// matches the one specified here. This is especially true if - /// the loaded library implements the necessary interface manually. - pub unsafe fn try_with>(path: P) -> Result { - let library = - Library::new(path.as_ref()).map_err(|_| Error::Connector("unable to load library"))?; - - let desc = library - .get::<*mut ConnectorDescriptor>(b"MEMFLOW_CONNECTOR\0") - .map_err(|_| Error::Connector("connector descriptor not found"))? - .read(); - - if desc.connector_version != MEMFLOW_CONNECTOR_VERSION { - warn!( - "connector {:?} has a different version. version {} required, found {}.", - path.as_ref(), - MEMFLOW_CONNECTOR_VERSION, - desc.connector_version - ); - return Err(Error::Connector("connector version mismatch")); - } - - Ok(Self { - _library: Arc::new(library), - name: desc.name.to_string(), - factory: desc.factory, - }) - } - - /// Creates a new connector instance from this library. - /// The connector is initialized with the arguments provided to this function. - /// - /// # Safety - /// - /// Loading third party libraries is inherently unsafe and the compiler - /// cannot guarantee that the implementation of the library - /// matches the one specified here. This is especially true if - /// the loaded library implements the necessary interface manually. - /// - /// It is adviced to use a proc macro for defining a connector. - pub unsafe fn create(&self, args: &ConnectorArgs) -> Result { - let connector_res = (self.factory)(args); - - if let Err(err) = connector_res { - debug!("{}", err) - } - - // We do not want to return error with data from the shared library - // that may get unloaded before it gets displayed - let instance = connector_res?; - - Ok(ConnectorInstance { - _library: self._library.clone(), - instance, - }) - } -} - -/// Describes initialized connector instance -/// -/// This structure is returned by `Connector`. It is needed to maintain reference -/// counts to the loaded connector library. -#[derive(Clone)] -pub struct ConnectorInstance { - instance: ConnectorType, - - /// Internal library arc. - /// - /// This will keep the library loaded in memory as long as the connector instance is alive. - /// This has to be the last member of the struct so the library will be unloaded _after_ - /// the instance is destroyed. - /// - /// If the library is unloaded prior to the instance this will lead to a SIGSEGV. - _library: Arc, -} - -impl std::ops::Deref for ConnectorInstance { - type Target = dyn CloneablePhysicalMemory; - - fn deref(&self) -> &Self::Target { - &*self.instance - } -} - -impl std::ops::DerefMut for ConnectorInstance { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut *self.instance - } -} diff --git a/apex_dma/memflow_lib/memflow/src/connector/mmap.rs b/apex_dma/memflow_lib/memflow/src/connector/mmap.rs index a1bec74..b9e0e79 100644 --- a/apex_dma/memflow_lib/memflow/src/connector/mmap.rs +++ b/apex_dma/memflow_lib/memflow/src/connector/mmap.rs @@ -1,13 +1,14 @@ -/*! -Basic connector which works on mapped memory. -*/ - -use crate::error::{Error, Result}; -use crate::iter::FnExtend; +//! Basic connector which works on mapped memory. +use crate::error::{Error, ErrorKind, ErrorOrigin, Result}; use crate::mem::{ - MemoryMap, PhysicalMemory, PhysicalMemoryMetadata, PhysicalReadData, PhysicalWriteData, + opt_call, MemoryMap, PhysicalMemory, PhysicalMemoryMetadata, PhysicalReadMemOps, + PhysicalWriteMemOps, }; -use crate::types::Address; +use crate::types::{umem, Address}; + +use crate::cglue::*; + +use std::convert::TryInto; pub struct MappedPhysicalMemory { info: F, @@ -30,14 +31,17 @@ impl MappedPhysicalMemory<&'static mut [u8], MemoryMap<&'static mut [u8]>> { /// /// This connector assumes the memory map is valid, and writeable. Failure for these conditions /// to be met leads to undefined behaviour (most likely a segfault) when reading/writing. - pub unsafe fn from_addrmap_mut(map: MemoryMap<(Address, usize)>) -> Self { + pub unsafe fn from_addrmap_mut(map: MemoryMap<(Address, umem)>) -> Self { let mut ret_map = MemoryMap::new(); map.into_iter() .map(|(base, (real_base, size))| { ( base, - std::slice::from_raw_parts_mut(real_base.as_u64() as _, size), + std::slice::from_raw_parts_mut( + real_base.to_umem() as _, + size.try_into().unwrap(), + ), ) }) .for_each(|(base, buf)| { @@ -55,14 +59,14 @@ impl MappedPhysicalMemory<&'static [u8], MemoryMap<&'static [u8]>> { /// /// This connector assumes the memory map is valid. Failure for this condition to be met leads /// to undefined behaviour (most likely a segfault) when reading. - pub unsafe fn from_addrmap(map: MemoryMap<(Address, usize)>) -> Self { + pub unsafe fn from_addrmap(map: MemoryMap<(Address, umem)>) -> Self { let mut ret_map = MemoryMap::new(); map.into_iter() .map(|(base, (real_base, size))| { ( base, - std::slice::from_raw_parts(real_base.as_u64() as _, size), + std::slice::from_raw_parts(real_base.to_umem() as _, size.try_into().unwrap()), ) }) .for_each(|(base, buf)| { @@ -83,78 +87,105 @@ impl, F: AsRef>> MappedPhysicalMemory { } } +#[allow(clippy::needless_option_as_deref)] impl<'a, F: AsRef> + Send> PhysicalMemory for MappedPhysicalMemory<&'a mut [u8], F> { - fn phys_read_raw_list(&mut self, data: &mut [PhysicalReadData]) -> Result<()> { - let mut void = FnExtend::void(); - for (mapped_buf, buf) in self.info.as_ref().map_iter( - data.iter_mut() - .map(|PhysicalReadData(addr, buf)| (*addr, &mut **buf)), - &mut void, - ) { + fn phys_read_raw_iter(&mut self, mut data: PhysicalReadMemOps) -> Result<()> { + for CTup3(mapped_buf, meta_addr, mut buf) in + self.info.as_ref().map_iter(data.inp, data.out_fail) + { buf.copy_from_slice(mapped_buf.as_ref()); + opt_call(data.out.as_deref_mut(), CTup2(meta_addr, buf)); } Ok(()) } - fn phys_write_raw_list(&mut self, data: &[PhysicalWriteData]) -> Result<()> { - let mut void = FnExtend::void(); - - for (mapped_buf, buf) in self - .info - .as_ref() - .map_iter(data.iter().copied().map(<_>::from), &mut void) + fn phys_write_raw_iter(&mut self, mut data: PhysicalWriteMemOps) -> Result<()> { + for CTup3(mapped_buf, meta_addr, buf) in + self.info.as_ref().map_iter(data.inp, data.out_fail) { - mapped_buf.as_mut().copy_from_slice(buf); + mapped_buf.as_mut().copy_from_slice(buf.into()); + opt_call(data.out.as_deref_mut(), CTup2(meta_addr, buf)); } Ok(()) } fn metadata(&self) -> PhysicalMemoryMetadata { + let max_address = self + .info + .as_ref() + .iter() + .last() + .map(|map| map.base().to_umem() + map.output().len() as umem) + .unwrap() + - 1; + let real_size = self + .info + .as_ref() + .iter() + .fold(0, |s, m| s + m.output().len() as umem); PhysicalMemoryMetadata { - size: self - .info - .as_ref() - .iter() - .last() - .map(|map| map.base().as_usize() + map.output().len()) - .unwrap(), + max_address: max_address.into(), + real_size, readonly: false, + ideal_batch_size: u32::MAX, } } } +#[allow(clippy::needless_option_as_deref)] impl<'a, F: AsRef> + Send> PhysicalMemory for MappedPhysicalMemory<&'a [u8], F> { - fn phys_read_raw_list(&mut self, data: &mut [PhysicalReadData]) -> Result<()> { - let mut void = FnExtend::void(); - for (mapped_buf, buf) in self.info.as_ref().map_iter( - data.iter_mut() - .map(|PhysicalReadData(addr, buf)| (*addr, &mut **buf)), - &mut void, - ) { + fn phys_read_raw_iter(&mut self, mut data: PhysicalReadMemOps) -> Result<()> { + for CTup3(mapped_buf, meta_addr, mut buf) in + self.info.as_ref().map_iter(data.inp, data.out_fail) + { buf.copy_from_slice(mapped_buf.as_ref()); + opt_call(data.out.as_deref_mut(), CTup2(meta_addr, buf)); } Ok(()) } - fn phys_write_raw_list(&mut self, _data: &[PhysicalWriteData]) -> Result<()> { - Err(Error::Connector("Target mapping is not writeable")) + fn phys_write_raw_iter(&mut self, _data: PhysicalWriteMemOps) -> Result<()> { + Err(Error(ErrorOrigin::Connector, ErrorKind::ReadOnly) + .log_error("target mapping is not writeable")) } fn metadata(&self) -> PhysicalMemoryMetadata { + let max_address = self + .info + .as_ref() + .iter() + .last() + .map(|map| map.base().to_umem() + map.output().len() as umem) + .unwrap() + - 1; + let real_size = self + .info + .as_ref() + .iter() + .fold(0, |s, m| s + m.output().len() as umem); PhysicalMemoryMetadata { - size: self - .info - .as_ref() - .iter() - .last() - .map(|map| map.base().as_usize() + map.output().len()) - .unwrap(), + max_address: max_address.into(), + real_size, readonly: true, + ideal_batch_size: u32::MAX, } } } + +#[cfg(feature = "plugins")] +cglue_impl_group!( + MappedPhysicalMemory>>, + crate::plugins::ConnectorInstance, + {} +); +#[cfg(feature = "plugins")] +cglue_impl_group!( + MappedPhysicalMemory>>, + crate::plugins::ConnectorInstance, + {} +); diff --git a/apex_dma/memflow_lib/memflow/src/connector/mod.rs b/apex_dma/memflow_lib/memflow/src/connector/mod.rs index 01f793b..496c20f 100644 --- a/apex_dma/memflow_lib/memflow/src/connector/mod.rs +++ b/apex_dma/memflow_lib/memflow/src/connector/mod.rs @@ -9,32 +9,27 @@ This module also contains functions to interface with dynamically loaded connect The inventory system is feature gated behind the `inventory` feature. */ -pub mod args; -#[doc(hidden)] -pub use args::ConnectorArgs; - -#[cfg(feature = "inventory")] -pub mod inventory; -#[doc(hidden)] -#[cfg(feature = "inventory")] -pub use inventory::{ - Connector, ConnectorDescriptor, ConnectorInstance, ConnectorInventory, ConnectorType, - MEMFLOW_CONNECTOR_VERSION, -}; - +// TODO: move all of this in a helper module and only keep the connector plugin stuff #[cfg(feature = "std")] pub mod fileio; #[doc(hidden)] #[cfg(feature = "std")] -pub use fileio::FileIOMemory; +pub use fileio::{CloneFile, FileIoMemory}; #[cfg(feature = "filemap")] pub mod filemap; #[cfg(feature = "filemap")] pub use filemap::{ - MMAPInfo, MMAPInfoMut, ReadMappedFilePhysicalMemory, WriteMappedFilePhysicalMemory, + MmapInfo, MmapInfoMut, ReadMappedFilePhysicalMemory, WriteMappedFilePhysicalMemory, }; pub mod mmap; #[doc(hidden)] pub use mmap::MappedPhysicalMemory; + +pub mod cpu_state; +#[doc(hidden)] +pub use cpu_state::{ConnectorCpuState, CpuState}; +#[doc(hidden)] +#[cfg(feature = "plugins")] +pub use cpu_state::{CpuStateArcBox, IntoCpuStateArcBox}; diff --git a/apex_dma/memflow_lib/memflow/src/dummy/mem.rs b/apex_dma/memflow_lib/memflow/src/dummy/mem.rs new file mode 100644 index 0000000..a9d2409 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/dummy/mem.rs @@ -0,0 +1,125 @@ +use crate::cglue::*; +use crate::connector::MappedPhysicalMemory; +use crate::derive::connector; +use crate::error::{Error, ErrorKind, ErrorOrigin, Result}; +use crate::mem::mem_data::*; +use crate::mem::{MemoryMap, PhysicalMemory, PhysicalMemoryMapping, PhysicalMemoryMetadata}; +use crate::plugins::*; +use crate::types::{size, umem, Address}; + +cglue_impl_group!(DummyMemory, ConnectorInstance, {}); + +#[derive(Copy, Clone)] +#[repr(C, align(0x1000))] +struct AlignedPage([u8; 0x1000]); + +pub struct DummyMemory { + buf: Box<[AlignedPage]>, + mem: MappedPhysicalMemory<&'static mut [u8], MemoryMap<&'static mut [u8]>>, +} + +impl DummyMemory { + /// Creates a new DummyMemory object with the given size + /// + /// # Remarks: + /// + /// If the provided size is not aligned to 0x1000 bytes DummyMemory will over-allocate to enforce the alignment. + pub fn new(size: usize) -> Self { + let pages = (size / 0x1000) + (size % 0x1000).min(1); + let buf = vec![AlignedPage([0_u8; 0x1000]); pages].into_boxed_slice(); + + let mut map = MemoryMap::new(); + map.push_range( + Address::null(), + (buf.len() * 0x1000).into(), + (buf.as_ptr() as umem).into(), + ); + + let buf_mem = unsafe { MappedPhysicalMemory::from_addrmap_mut(map) }; + + Self { buf, mem: buf_mem } + } + + pub(crate) fn buf_ptr(&self) -> *const u8 { + self.buf.as_ptr().cast::() + } +} + +impl Clone for DummyMemory { + fn clone(&self) -> Self { + let mut map = MemoryMap::new(); + map.push_range( + Address::null(), + (self.buf.len() * 0x1000).into(), + (self.buf.as_ptr() as usize).into(), + ); + + let mem = unsafe { MappedPhysicalMemory::from_addrmap_mut(map) }; + + Self { + buf: self.buf.clone(), + mem, + } + } +} + +impl PhysicalMemory for DummyMemory { + #[inline] + fn phys_read_raw_iter(&mut self, data: PhysicalReadMemOps) -> Result<()> { + self.mem.phys_read_raw_iter(data) + } + + #[inline] + fn phys_write_raw_iter(&mut self, data: PhysicalWriteMemOps) -> Result<()> { + self.mem.phys_write_raw_iter(data) + } + + #[inline] + fn metadata(&self) -> PhysicalMemoryMetadata { + self.mem.metadata() + } + + #[inline] + fn set_mem_map(&mut self, mem_map: &[PhysicalMemoryMapping]) { + self.mem.set_mem_map(mem_map) + } +} + +pub fn parse_size(args: &Args) -> Result { + let (size, size_mul) = { + let size = args.get("size").unwrap_or("2m"); + + let mul_arr = &[ + (size::kb(1), ["kb", "k"]), + (size::mb(1), ["mb", "m"]), + (size::gb(1), ["gb", "g"]), + ]; + + mul_arr + .iter() + .flat_map(|(m, e)| e.iter().map(move |e| (*m, e))) + .filter_map(|(m, e)| { + if size.to_lowercase().ends_with(e) { + Some((size.trim_end_matches(e), m)) + } else { + None + } + }) + .next() + .ok_or(Error( + ErrorOrigin::Connector, + ErrorKind::InvalidMemorySizeUnit, + ))? + }; + + let size = usize::from_str_radix(size, 16) + .map_err(|_| Error(ErrorOrigin::Connector, ErrorKind::InvalidMemorySize))?; + + Ok(size * size_mul) +} + +#[connector(name = "dummy")] +pub fn create_connector(args: &ConnectorArgs) -> Result { + let size = parse_size(&args.extra_args)?; + Ok(DummyMemory::new(size)) +} diff --git a/apex_dma/memflow_lib/memflow/src/dummy/mod.rs b/apex_dma/memflow_lib/memflow/src/dummy/mod.rs new file mode 100644 index 0000000..7b9595a --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/dummy/mod.rs @@ -0,0 +1,10 @@ +pub mod mem; +pub mod os; +pub mod process; + +pub(crate) mod offset_pt; +pub(crate) use offset_pt::OffsetPageTable; + +pub use mem::DummyMemory; +pub use os::DummyOs; +pub use process::DummyProcessInfo; diff --git a/apex_dma/memflow_lib/memflow/src/dummy/offset_pt.rs b/apex_dma/memflow_lib/memflow/src/dummy/offset_pt.rs new file mode 100644 index 0000000..d9a2b17 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/dummy/offset_pt.rs @@ -0,0 +1,261 @@ +use x86_64::structures::paging::{ + frame::PhysFrame, mapper::*, page_table::PageTable, Page, PageTableFlags, *, +}; + +use x86_64::VirtAddr; + +/// A Mapper implementation that requires that the complete physically memory is mapped at some +/// offset in the virtual address space. +#[derive(Debug)] +pub struct OffsetPageTable<'a> { + inner: MappedPageTable<'a, PhysOffset>, +} + +impl<'a> OffsetPageTable<'a> { + /// Creates a new `OffsetPageTable` that uses the given offset for converting virtual + /// to physical addresses. + /// + /// The complete physical memory must be mapped in the virtual address space starting at + /// address `phys_offset`. This means that for example physical address `0x5000` can be + /// accessed through virtual address `phys_offset + 0x5000`. This mapping is required because + /// the mapper needs to access page tables, which are not mapped into the virtual address + /// space by default. + /// + /// ## Safety + /// + /// This function is unsafe because the caller must guarantee that the passed `phys_offset` + /// is correct. Also, the passed `level_4_table` must point to the level 4 page table + /// of a valid page table hierarchy. Otherwise this function might break memory safety, e.g. + /// by writing to an illegal memory location. + #[inline] + pub unsafe fn new(level_4_table: &'a mut PageTable, phys_offset: VirtAddr) -> Self { + let phys_offset = PhysOffset { + offset: phys_offset, + }; + Self { + inner: MappedPageTable::new(level_4_table, phys_offset), + } + } +} + +#[derive(Debug)] +struct PhysOffset { + offset: VirtAddr, +} + +unsafe impl PageTableFrameMapping for PhysOffset { + fn frame_to_pointer(&self, frame: PhysFrame) -> *mut PageTable { + let virt = self.offset + frame.start_address().as_u64(); + virt.as_u64() as *mut _ + } +} + +// delegate all trait implementations to inner + +impl<'a> Mapper for OffsetPageTable<'a> { + #[inline] + unsafe fn map_to_with_table_flags( + &mut self, + page: Page, + frame: PhysFrame, + flags: PageTableFlags, + parent_table_flags: PageTableFlags, + allocator: &mut A, + ) -> Result, MapToError> + where + A: FrameAllocator + ?Sized, + { + self.inner + .map_to_with_table_flags(page, frame, flags, parent_table_flags, allocator) + } + + #[inline] + fn unmap( + &mut self, + page: Page, + ) -> Result<(PhysFrame, MapperFlush), UnmapError> { + self.inner.unmap(page) + } + + #[inline] + unsafe fn update_flags( + &mut self, + page: Page, + flags: PageTableFlags, + ) -> Result, FlagUpdateError> { + self.inner.update_flags(page, flags) + } + + #[inline] + unsafe fn set_flags_p4_entry( + &mut self, + page: Page, + flags: PageTableFlags, + ) -> Result { + self.inner.set_flags_p4_entry(page, flags) + } + + #[inline] + unsafe fn set_flags_p3_entry( + &mut self, + page: Page, + flags: PageTableFlags, + ) -> Result { + self.inner.set_flags_p3_entry(page, flags) + } + + #[inline] + unsafe fn set_flags_p2_entry( + &mut self, + page: Page, + flags: PageTableFlags, + ) -> Result { + self.inner.set_flags_p2_entry(page, flags) + } + + #[inline] + fn translate_page(&self, page: Page) -> Result, TranslateError> { + self.inner.translate_page(page) + } +} + +impl<'a> Mapper for OffsetPageTable<'a> { + #[inline] + unsafe fn map_to_with_table_flags( + &mut self, + page: Page, + frame: PhysFrame, + flags: PageTableFlags, + parent_table_flags: PageTableFlags, + allocator: &mut A, + ) -> Result, MapToError> + where + A: FrameAllocator + ?Sized, + { + self.inner + .map_to_with_table_flags(page, frame, flags, parent_table_flags, allocator) + } + + #[inline] + fn unmap( + &mut self, + page: Page, + ) -> Result<(PhysFrame, MapperFlush), UnmapError> { + self.inner.unmap(page) + } + + #[inline] + unsafe fn update_flags( + &mut self, + page: Page, + flags: PageTableFlags, + ) -> Result, FlagUpdateError> { + self.inner.update_flags(page, flags) + } + + #[inline] + unsafe fn set_flags_p4_entry( + &mut self, + page: Page, + flags: PageTableFlags, + ) -> Result { + self.inner.set_flags_p4_entry(page, flags) + } + + #[inline] + unsafe fn set_flags_p3_entry( + &mut self, + page: Page, + flags: PageTableFlags, + ) -> Result { + self.inner.set_flags_p3_entry(page, flags) + } + + #[inline] + unsafe fn set_flags_p2_entry( + &mut self, + page: Page, + flags: PageTableFlags, + ) -> Result { + self.inner.set_flags_p2_entry(page, flags) + } + + #[inline] + fn translate_page(&self, page: Page) -> Result, TranslateError> { + self.inner.translate_page(page) + } +} + +impl<'a> Mapper for OffsetPageTable<'a> { + #[inline] + unsafe fn map_to_with_table_flags( + &mut self, + page: Page, + frame: PhysFrame, + flags: PageTableFlags, + parent_table_flags: PageTableFlags, + allocator: &mut A, + ) -> Result, MapToError> + where + A: FrameAllocator + ?Sized, + { + self.inner + .map_to_with_table_flags(page, frame, flags, parent_table_flags, allocator) + } + + #[inline] + fn unmap( + &mut self, + page: Page, + ) -> Result<(PhysFrame, MapperFlush), UnmapError> { + self.inner.unmap(page) + } + + #[inline] + unsafe fn update_flags( + &mut self, + page: Page, + flags: PageTableFlags, + ) -> Result, FlagUpdateError> { + self.inner.update_flags(page, flags) + } + + #[inline] + unsafe fn set_flags_p4_entry( + &mut self, + page: Page, + flags: PageTableFlags, + ) -> Result { + self.inner.set_flags_p4_entry(page, flags) + } + + #[inline] + unsafe fn set_flags_p3_entry( + &mut self, + page: Page, + flags: PageTableFlags, + ) -> Result { + self.inner.set_flags_p3_entry(page, flags) + } + + #[inline] + unsafe fn set_flags_p2_entry( + &mut self, + page: Page, + flags: PageTableFlags, + ) -> Result { + self.inner.set_flags_p2_entry(page, flags) + } + + #[inline] + fn translate_page(&self, page: Page) -> Result, TranslateError> { + self.inner.translate_page(page) + } +} + +impl<'a> Translate for OffsetPageTable<'a> { + #[inline] + fn translate(&self, addr: VirtAddr) -> TranslateResult { + self.inner.translate(addr) + } +} diff --git a/apex_dma/memflow_lib/memflow/src/dummy/os.rs b/apex_dma/memflow_lib/memflow/src/dummy/os.rs new file mode 100644 index 0000000..ee3678d --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/dummy/os.rs @@ -0,0 +1,648 @@ +use super::mem::*; +use super::process::*; + +use crate::architecture::ArchitectureIdent; +use crate::error::{Error, ErrorKind, ErrorOrigin, Result}; +use crate::mem::{phys_mem::*, virt_mem::*, *}; +use crate::os::{process::*, root::*, *}; +use crate::plugins::{self, *}; +use crate::types::{clamp_to_usize, imem, mem, size, umem, Address}; + +use crate::cglue::*; +use rand::seq::SliceRandom; +use rand::{Rng, SeedableRng}; +use rand_xorshift::XorShiftRng; +use std::collections::VecDeque; +use std::convert::TryInto; + +use crate::architecture::x86::{x64, X86VirtualTranslate}; + +use x86_64::{ + structures::paging, + structures::paging::{ + mapper::Mapper, + page::{PageSize, Size1GiB, Size2MiB, Size4KiB}, + page_table::{PageTable, PageTableFlags}, + FrameAllocator, PhysFrame, Translate, + }, + PhysAddr, VirtAddr, +}; + +use super::OffsetPageTable; + +#[derive(Clone, Copy, Debug)] +enum X64PageSize { + P4k = 0, + P2m = 1, + P1g = 2, +} + +impl X64PageSize { + fn to_size(self) -> usize { + match self { + X64PageSize::P4k => size::kb(4), + X64PageSize::P2m => size::mb(2), + X64PageSize::P1g => size::gb(1), + } + } + + fn to_idx(self) -> usize { + match self { + X64PageSize::P4k => 0, + X64PageSize::P2m => 1, + X64PageSize::P1g => 2, + } + } + + fn from_idx(idx: usize) -> Self { + match idx { + 2 => X64PageSize::P1g, + 1 => X64PageSize::P2m, + _ => X64PageSize::P4k, + } + } +} +#[derive(Clone, Copy, Debug)] +struct PageInfo { + addr: Address, + size: X64PageSize, +} + +impl PageInfo { + fn split_to_size(&self, new_size: X64PageSize) -> Vec { + let mut ret = vec![]; + for o in 0..(self.size.to_size() / new_size.to_size()) { + ret.push(PageInfo { + addr: self.addr + new_size.to_size() as umem * o as umem, + size: new_size, + }); + } + ret + } + + fn split_down(&self) -> Vec { + self.split_to_size(X64PageSize::from_idx(self.size.to_idx() - 1)) + } +} + +cglue_impl_group!(DummyOs, OsInstance, PhysicalMemory); + +pub struct DummyOs { + mem: DummyMemory, + page_list: VecDeque, + pt_pages: Vec, + last_pid: Pid, + rng: XorShiftRng, + processes: Vec, + info: OsInfo, +} + +impl Clone for DummyOs { + fn clone(&self) -> Self { + Self { + mem: self.mem.clone(), + page_list: VecDeque::new(), + pt_pages: vec![], + last_pid: self.last_pid, + rng: self.rng.clone(), + processes: self.processes.clone(), + info: self.info.clone(), + } + } +} + +impl AsMut for DummyOs { + fn as_mut(&mut self) -> &mut DummyMemory { + &mut self.mem + } +} + +unsafe impl FrameAllocator for DummyOs +where + S: PageSize, +{ + fn allocate_frame(&mut self) -> Option> { + let new_page = self.alloc_pt_page(); + match PhysFrame::from_start_address(PhysAddr::new(new_page.addr.to_umem() as u64)) { + Ok(s) => Some(s), + _ => None, + } + } +} + +impl DummyOs { + pub fn new_and_dtb( + mem: DummyMemory, + virt_size: usize, + buffer: &[u8], + ) -> (Self, Address, Address) { + let mut ret = Self::new(mem); + let (dtb, virt_base) = ret.alloc_dtb(virt_size, buffer); + (ret, dtb, virt_base) + } + + pub fn into_inner(self) -> DummyMemory { + self.mem + } + + pub fn quick_process(virt_size: usize, buffer: &[u8]) -> ::IntoProcessType { + let mem = DummyMemory::new(virt_size + size::mb(2)); + let mut os = Self::new(mem); + let pid = os.alloc_process(virt_size, buffer); + os.into_process_by_pid(pid).unwrap() + } + + /// Creates a new DummyOs object with a fixed default seed + /// + /// Note: + /// + /// Using a fixed seed for the rng will provide reproducability throughout test cases. + pub fn new(mem: DummyMemory) -> Self { + Self::with_seed(mem, 1) + } + + /// Creates a new DummyOs object with the given seed as a starting value for the RNG + pub fn with_seed(mem: DummyMemory, seed: u64) -> Self { + Self::with_rng(mem, SeedableRng::seed_from_u64(seed)) + } + + /// Creates a new DummyOs object with the given RNG. + /// + /// Note: + /// + /// The RNG has to be of type `XorShiftRng`. + pub fn with_rng(mem: DummyMemory, mut rng: XorShiftRng) -> Self { + let mut page_prelist = vec![]; + + let mut i = Address::null(); + let size_addr = mem.metadata().max_address + 1_usize; + + while i < size_addr { + if let Some(page_info) = { + if size_addr - i >= X64PageSize::P1g.to_size() as imem { + Some(PageInfo { + addr: i, + size: X64PageSize::P1g, + }) + } else if size_addr - i >= X64PageSize::P2m.to_size() as imem { + Some(PageInfo { + addr: i, + size: X64PageSize::P2m, + }) + } else if size_addr - i >= X64PageSize::P4k.to_size() as imem { + Some(PageInfo { + addr: i, + size: X64PageSize::P4k, + }) + } else { + None + } + } { + i += page_info.size.to_size(); + page_prelist.push(page_info); + } else { + break; + } + } + + let mut page_list: Vec = vec![]; + + let mut split = [2, 0, 0].to_vec(); + + for _ in 0..2 { + page_prelist.shuffle(&mut rng); + for i in page_prelist { + let mut list = if split[i.size.to_idx()] == 0 + || (split[i.size.to_idx()] != 2 && rng.gen::()) + { + split[i.size.to_idx()] = std::cmp::max(split[i.size.to_idx()], 1); + i.split_down() + } else { + [i].to_vec() + }; + + list.shuffle(&mut rng); + + for o in list { + page_list.push(o); + } + } + + page_prelist = page_list.clone(); + } + + Self { + mem, + page_list: page_list.into(), + pt_pages: vec![], + last_pid: 0, + rng, + processes: vec![], + info: OsInfo { + base: Address::INVALID, + size: 0, + arch: ArchitectureIdent::X86(64, false), + }, + } + } + + pub fn vtop(&mut self, dtb_base: Address, virt_addr: Address) -> Option
{ + let pml4 = unsafe { + &mut *(self + .mem + .buf_ptr() + .add(dtb_base.to_umem().try_into().unwrap()) + .cast::() as *mut _) + }; + + let pt_mapper = + unsafe { OffsetPageTable::new(pml4, VirtAddr::from_ptr(self.mem.buf_ptr())) }; + + pt_mapper + .translate_addr(VirtAddr::new(virt_addr.to_umem() as u64)) + .map(|addr| addr.as_u64().into()) + } + + fn internal_alloc_process(&mut self, map_size: usize, test_buf: &[u8]) -> DummyProcessInfo { + let (dtb, address) = self.alloc_dtb(map_size, test_buf); + + self.last_pid += 1; + + DummyProcessInfo { + info: ProcessInfo { + address, + pid: self.last_pid, + state: ProcessState::Alive, + name: "Dummy".into(), + path: "/some/dummy".into(), + command_line: "/some/dummy --dummyarg".into(), + sys_arch: x64::ARCH.ident(), + proc_arch: x64::ARCH.ident(), + dtb1: dtb, + dtb2: Address::invalid(), + }, + dtb, + map_size, + modules: vec![], + } + } + + pub fn alloc_process(&mut self, map_size: usize, test_buf: &[u8]) -> Pid { + let proc = self.internal_alloc_process(map_size, test_buf); + + let ret = proc.info.pid; + + self.processes.push(proc); + + ret + } + + pub fn alloc_process_with_module(&mut self, map_size: usize, test_buf: &[u8]) -> Pid { + let mut proc = self.internal_alloc_process(map_size, test_buf); + + let ret = proc.info.pid; + + proc.add_modules(1, map_size / 2); + + self.processes.push(proc); + + ret + } + + pub fn alloc_dtb(&mut self, map_size: usize, test_buf: &[u8]) -> (Address, Address) { + let virt_base = (Address::null() + + self + .rng + .gen_range(0x0001_0000_0000_u64..((!0_u64) << 20) >> 20)) + .as_mem_aligned(mem::gb(2)); + + ( + self.alloc_dtb_const_base(virt_base, map_size, test_buf), + virt_base, + ) + } + + pub fn process_alloc_random_mem(&mut self, proc: &DummyProcessInfo, cnt: usize, size: usize) { + for _ in 0..cnt { + let virt_base = (Address::null() + + self + .rng + .gen_range(0x0001_0000_0000_u64..((!0_u64) << 20) >> 20)) + .as_mem_aligned(mem::gb(2)); + + self.alloc_mem_to_dtb(proc.dtb, virt_base, size, &[]); + } + } + + pub fn alloc_dtb_const_base( + &mut self, + virt_base: Address, + map_size: usize, + test_buf: &[u8], + ) -> Address { + let dtb = self.alloc_pt_page().addr; + + unsafe { + *(self + .mem + .buf_ptr() + .add(clamp_to_usize(dtb.to_umem())) + .cast::() as *mut _) = PageTable::new() + }; + + self.alloc_mem_to_dtb(dtb, virt_base, map_size, test_buf) + } + + pub fn alloc_mem_to_dtb( + &mut self, + dtb: Address, + virt_base: Address, + map_size: usize, + test_buf: &[u8], + ) -> Address { + let mut cur_len = 0; + + let pml4 = unsafe { + &mut *(self + .mem + .buf_ptr() + .add(clamp_to_usize(dtb.to_umem())) + .cast::() as *mut _) + }; + + let mut pt_mapper = + unsafe { OffsetPageTable::new(pml4, VirtAddr::from_ptr(self.mem.buf_ptr())) }; + + while cur_len < map_size { + let page_info = self.next_page_for_address(cur_len.into()); + let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE; + + if test_buf.len() >= (cur_len + page_info.size.to_size() as usize) { + self.mem + .phys_write( + page_info.addr.into(), + &test_buf[cur_len..(cur_len + page_info.size.to_size() as usize)], + ) + .unwrap(); + } else if test_buf.len() > cur_len { + self.mem + .phys_write(page_info.addr.into(), &test_buf[cur_len..]) + .unwrap(); + } + + unsafe { + match page_info.size { + X64PageSize::P1g => pt_mapper + .map_to( + paging::page::Page::::from_start_address_unchecked( + VirtAddr::new((virt_base + cur_len).to_umem() as u64), + ), + PhysFrame::from_start_address_unchecked(PhysAddr::new( + page_info.addr.to_umem() as u64, + )), + flags | PageTableFlags::HUGE_PAGE, + self, + ) + .is_ok(), + X64PageSize::P2m => pt_mapper + .map_to( + paging::page::Page::::from_start_address_unchecked( + VirtAddr::new((virt_base + cur_len).to_umem() as u64), + ), + PhysFrame::from_start_address_unchecked(PhysAddr::new( + page_info.addr.to_umem() as u64, + )), + flags | PageTableFlags::HUGE_PAGE, + self, + ) + .is_ok(), + X64PageSize::P4k => pt_mapper + .map_to( + paging::page::Page::::from_start_address_unchecked( + VirtAddr::new((virt_base + cur_len).to_umem() as u64), + ), + PhysFrame::from_start_address_unchecked(PhysAddr::new( + page_info.addr.to_umem() as u64, + )), + flags, + self, + ) + .is_ok(), + }; + } + cur_len += page_info.size.to_size(); + } + + dtb + } + + //Given it's the tests, we will have a panic if out of mem + fn alloc_pt_page(&mut self) -> PageInfo { + if let Some(page) = self.pt_pages.pop() { + page + } else { + self.pt_pages = self + .page_list + .pop_front() + .unwrap() + .split_to_size(X64PageSize::P4k); + self.pt_pages.pop().unwrap() + } + } + + fn next_page_for_address(&mut self, _addr: Address) -> PageInfo { + self.alloc_pt_page() + } +} + +pub type DummyVirtMem = VirtualDma; + +impl Os for DummyOs { + type ProcessType<'a> = DummyProcess>>; + type IntoProcessType = DummyProcess>; + + /// Walks a process list and calls a callback for each process structure address + /// + /// The callback is fully opaque. We need this style so that C FFI can work seamlessly. + fn process_address_list_callback(&mut self, mut callback: AddressCallback) -> Result<()> { + self.processes + .iter() + .take_while(|p| callback.call(p.info.address)) + .for_each(|_| {}); + + Ok(()) + } + + /// Find process information by its internal address + fn process_info_by_address(&mut self, address: Address) -> Result { + self.processes + .iter() + .find(|p| p.info.address == address) + .ok_or(Error(ErrorOrigin::OsLayer, ErrorKind::ProcessNotFound)) + .map(|p| p.info.clone()) + } + + /// Creates a process by its internal address + /// + /// It will share the underlying memory resources + fn process_by_info(&mut self, info: ProcessInfo) -> Result> { + let proc = self + .processes + .iter() + .find(|p| p.info.address == info.address) + .ok_or(Error(ErrorOrigin::OsLayer, ErrorKind::InvalidProcessInfo))? + .clone(); + Ok(DummyProcess { + mem: VirtualDma::new( + self.mem.forward_mut(), + x64::ARCH, + x64::new_translator(proc.dtb), + ), + proc, + }) + } + + /// Creates a process by its internal address + /// + /// It will consume the kernel and not affect memory usage + /// + /// If no process with the specified address can be found this function will return an Error. + /// + /// This function can be useful for quickly accessing a process. + fn into_process_by_info(self, info: ProcessInfo) -> Result { + let proc = self + .processes + .iter() + .find(|p| p.info.address == info.address) + .ok_or(Error(ErrorOrigin::OsLayer, ErrorKind::InvalidProcessInfo))? + .clone(); + Ok(DummyProcess { + mem: VirtualDma::new(self.mem, x64::ARCH, x64::new_translator(proc.dtb)), + proc, + }) + } + + /// Walks the kernel module list and calls the provided callback for each module structure + /// address + /// + /// # Arguments + /// * `callback` - where to pass each matching module to. This is an opaque callback. + fn module_address_list_callback(&mut self, _callback: AddressCallback) -> Result<()> { + Ok(()) + } + + /// Retrieves a module by its structure address + /// + /// # Arguments + /// * `address` - address where module's information resides in + fn module_by_address(&mut self, _address: Address) -> Result { + Err(Error(ErrorOrigin::OsLayer, ErrorKind::ModuleNotFound)) + } + + /// Retrieves address of the primary module structure of the process + /// + /// This will generally be for the initial executable that was run + fn primary_module_address(&mut self) -> Result
{ + Err(Error(ErrorOrigin::OsLayer, ErrorKind::ModuleNotFound)) + } + + /// Retrieves a list of all imports of a given module + fn module_import_list_callback( + &mut self, + _info: &ModuleInfo, + _callback: ImportCallback, + ) -> Result<()> { + Ok(()) + } + + /// Retrieves a list of all exports of a given module + fn module_export_list_callback( + &mut self, + _info: &ModuleInfo, + _callback: ExportCallback, + ) -> Result<()> { + Ok(()) + } + + /// Retrieves a list of all sections of a given module + fn module_section_list_callback( + &mut self, + _info: &ModuleInfo, + _callback: SectionCallback, + ) -> Result<()> { + Ok(()) + } + + /// Retrieves the kernel info + fn info(&self) -> &OsInfo { + &self.info + } +} + +impl PhysicalMemory for DummyOs { + #[inline] + fn phys_read_raw_iter(&mut self, data: PhysicalReadMemOps) -> Result<()> { + self.mem.phys_read_raw_iter(data) + } + + #[inline] + fn phys_write_raw_iter(&mut self, data: PhysicalWriteMemOps) -> Result<()> { + self.mem.phys_write_raw_iter(data) + } + + #[inline] + fn metadata(&self) -> PhysicalMemoryMetadata { + self.mem.metadata() + } + + #[inline] + fn set_mem_map(&mut self, mem_map: &[PhysicalMemoryMapping]) { + self.mem.set_mem_map(mem_map) + } +} + +#[doc(hidden)] +#[no_mangle] +pub static MEMFLOW_OS_DUMMY: OsDescriptor = OsDescriptor { + plugin_version: MEMFLOW_PLUGIN_VERSION, + accept_input: false, + input_layout: <::CInputArg as ::abi_stable::StableAbi>::LAYOUT, + output_layout: <::Instance as ::abi_stable::StableAbi>::LAYOUT, + name: CSliceRef::from_str("dummy"), + version: CSliceRef::from_str(env!("CARGO_PKG_VERSION")), + description: CSliceRef::from_str("Dummy testing OS"), + help_callback: None, // TODO: add dummy help string + target_list_callback: None, + create: mf_create, +}; + +#[doc(hidden)] +extern "C" fn mf_create( + args: Option<&OsArgs>, + _connector: COption, + lib: LibArc, + logger: Option<&'static PluginLogger>, + out: &mut MuOsInstanceArcBox<'static>, +) -> i32 { + plugins::wrap(args, lib, logger, out, create_dummy) +} + +pub fn create_dummy(args: &OsArgs, lib: LibArc) -> Result> { + let size = super::mem::parse_size(&args.extra_args)?; + let mem = DummyMemory::new(size); + let mut os = DummyOs::new(mem); + os.alloc_process_with_module( + std::cmp::min( + size::mb(2), + size.saturating_sub(size::mb(2)) + size::kb(512), + ), + &[], + ); + let os = CBox::from(os); + let obj = group_obj!((os, lib) as OsInstance); + Ok(obj) + // Err(Error( + // ErrorOrigin::Connector, + // ErrorKind::InvalidMemorySizeUnit, + // )) +} diff --git a/apex_dma/memflow_lib/memflow/src/dummy/process.rs b/apex_dma/memflow_lib/memflow/src/dummy/process.rs new file mode 100644 index 0000000..cfbd9d4 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/dummy/process.rs @@ -0,0 +1,215 @@ +use crate::architecture::x86::{x64, X86VirtualTranslate}; +use crate::error::*; + +use crate::architecture::ArchitectureIdent; +use crate::mem::{mem_data::*, memory_view::*, PhysicalMemory, VirtualDma, VirtualTranslate2}; +use crate::os::process::*; +use crate::os::*; +use crate::plugins::*; +use crate::types::{gap_remover::GapRemover, imem, umem, Address, PageType}; + +use crate::cglue::*; +use rand::{thread_rng, Rng}; + +#[derive(Clone)] +pub struct DummyProcessInfo { + pub info: ProcessInfo, + pub map_size: usize, + pub dtb: Address, + pub modules: Vec, +} + +impl DummyProcessInfo { + pub fn add_modules(&mut self, count: usize, min_size: usize) { + let base = self.info.address + + thread_rng().gen_range(0..((self.map_size.saturating_sub(min_size)) / 2)); + + for i in 0..count { + self.modules.push(ModuleInfo { + address: Address::from((i * 1024) as umem), + parent_process: Address::INVALID, + base, + size: (thread_rng().gen_range( + (min_size as umem) + ..(self.map_size as umem - (base - self.info.address) as umem), + )), + name: "dummy.so".into(), + path: "/".into(), + arch: x64::ARCH.ident(), + }); + } + } + + pub fn translator(&self) -> X86VirtualTranslate { + x64::new_translator(self.dtb) + } +} + +cglue_impl_group!(DummyProcess, ProcessInstance, {}); +cglue_impl_group!(DummyProcess, IntoProcessInstance, {}); + +#[derive(Clone)] +pub struct DummyProcess { + pub proc: DummyProcessInfo, + pub mem: T, +} + +impl Process + for DummyProcess> +{ + /// Retrieves virtual address translator for the process (if applicable) + //fn vat(&mut self) -> Option<&mut Self::VirtualTranslateType>; + + fn state(&mut self) -> ProcessState { + ProcessState::Alive + } + + fn set_dtb(&mut self, dtb1: Address, _dtb2: Address) -> Result<()> { + self.proc.dtb = dtb1; + self.mem.set_translator(self.proc.translator()); + Ok(()) + } + + /// Walks the process' module list and calls the provided callback for each module + fn module_address_list_callback( + &mut self, + target_arch: Option<&ArchitectureIdent>, + callback: ModuleAddressCallback, + ) -> Result<()> { + self.proc + .modules + .iter() + .filter_map(|m| { + if target_arch.is_none() || Some(&m.arch) == target_arch { + Some(ModuleAddressInfo { + address: m.address, + arch: m.arch, + }) + } else { + None + } + }) + .feed_into(callback); + Ok(()) + } + + /// Retrieves a module by its structure address and architecture + /// + /// # Arguments + /// * `address` - address where module's information resides in + /// * `architecture` - architecture of the module. Should be either `ProcessInfo::proc_arch`, or `ProcessInfo::sys_arch`. + fn module_by_address( + &mut self, + address: Address, + architecture: ArchitectureIdent, + ) -> Result { + self.proc + .modules + .iter() + .find(|m| m.address == address && m.arch == architecture) + .cloned() + .ok_or(Error(ErrorOrigin::OsLayer, ErrorKind::ModuleNotFound)) + } + + /// Retrieves address of the primary module structure of the process + fn primary_module_address(&mut self) -> Result
{ + let mut ret = Err(Error(ErrorOrigin::OsLayer, ErrorKind::ModuleNotFound)); + let callback = &mut |moduleinfo: ModuleAddressInfo| { + ret = Ok(moduleinfo.address); + false + }; + let proc_arch = self.info().proc_arch; + self.module_address_list_callback(Some(&proc_arch), callback.into())?; + ret + } + + fn module_import_list_callback( + &mut self, + info: &ModuleInfo, + callback: ImportCallback, + ) -> Result<()> { + crate::os::util::module_import_list_callback(self, info, callback) + } + + fn module_export_list_callback( + &mut self, + info: &ModuleInfo, + callback: ExportCallback, + ) -> Result<()> { + crate::os::util::module_export_list_callback(self, info, callback) + } + + fn module_section_list_callback( + &mut self, + info: &ModuleInfo, + callback: SectionCallback, + ) -> Result<()> { + crate::os::util::module_section_list_callback(self, info, callback) + } + + /// Retrieves the process info + fn info(&self) -> &ProcessInfo { + &self.proc.info + } + + fn mapped_mem_range( + &mut self, + gap_size: imem, + start: Address, + end: Address, + out: MemoryRangeCallback, + ) { + GapRemover::new(out, gap_size, start, end).extend( + self.proc + .modules + .iter() + .map(|m| CTup3(m.base, m.size, PageType::UNKNOWN)), + ) + } +} + +impl MemoryView for DummyProcess { + fn read_raw_iter(&mut self, data: ReadRawMemOps) -> Result<()> { + self.mem.read_raw_iter(data) + } + + fn write_raw_iter(&mut self, data: WriteRawMemOps) -> Result<()> { + self.mem.write_raw_iter(data) + } + + fn metadata(&self) -> MemoryViewMetadata { + self.mem.metadata() + } +} + +#[cfg(test)] +mod tests { + use super::super::*; + use crate::cglue::*; + use crate::os::{Os, Process}; + use crate::plugins::ProcessInstance; + use crate::types::size; + + #[test] + pub fn primary_module() { + let mem = DummyMemory::new(size::mb(64)); + let mut os = DummyOs::new(mem); + + let pid = os.alloc_process(size::mb(60), &[]); + let mut prc = os.process_by_pid(pid).unwrap(); + prc.proc.add_modules(10, size::kb(1)); + + let module = prc.primary_module(); + assert!(module.is_ok()) + } + + #[test] + pub fn cglue_process() { + let mem = DummyMemory::new(size::mb(64)); + let mut os = DummyOs::new(mem); + + let pid = os.alloc_process(size::mb(60), &[]); + let prc = os.into_process_by_pid(pid).unwrap(); + let _obj = group_obj!(prc as ProcessInstance); + } +} diff --git a/apex_dma/memflow_lib/memflow/src/error.rs b/apex_dma/memflow_lib/memflow/src/error.rs index 69bb149..3b0ec47 100644 --- a/apex_dma/memflow_lib/memflow/src/error.rs +++ b/apex_dma/memflow_lib/memflow/src/error.rs @@ -2,125 +2,307 @@ Specialized `Error` and `Result` types for memflow. */ +use std::num::NonZeroI32; use std::prelude::v1::*; -use std::{convert, fmt, result, str}; +use std::{fmt, result, str}; + +use log::{debug, error, info, trace, warn}; + +use crate::cglue::IntError; #[cfg(feature = "std")] use std::error; -/// Specialized `Error` type for memflow errors. #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] -pub enum Error { - /// Generic error type containing a string - Other(&'static str), - /// Partial error. - /// - /// Catch-all for partial errors which have been - /// converted into full errors. - Partial, - /// Out of bounds. - /// - /// Catch-all for bounds check errors. - Bounds, - /// IO error - /// - /// Catch-all for io related errors. - IO(&'static str), - /// Invalid Architecture error. - /// - /// The architecture provided is not a valid argument for the given function. - InvalidArchitecture, - /// Connector error - /// - /// Catch-all for connector related errors - Connector(&'static str), - /// Physical Read Error - /// - /// A read/write from/to the physical memory has failed. - PhysicalMemory(&'static str), - /// VirtualTranslate Error - /// - /// Error when trying to translate virtual to physical memory addresses. - VirtualTranslate, - /// Virtual Memory Error - /// - /// A read/write from/to the virtual memory has failed. - VirtualMemory(&'static str), - /// Encoding error. - /// - /// Catch-all for string related errors such as lacking a nul terminator. - Encoding, +pub struct Error(pub ErrorOrigin, pub ErrorKind); + +impl Error { + /// Returns a static string representing the type of error. + pub fn as_str(&self) -> &'static str { + self.1.to_str() + } + + /// Returns a static string representing the type of error. + pub fn into_str(self) -> &'static str { + self.as_str() + } + + pub fn log_error(self, err: impl std::fmt::Display) -> Self { + error!("{}: {} ({})", self.0.to_str(), self.1.to_str(), err); + self + } + + pub fn log_warn(self, err: impl std::fmt::Display) -> Self { + warn!("{}: {} ({})", self.0.to_str(), self.1.to_str(), err); + self + } + + pub fn log_info(self, err: impl std::fmt::Display) -> Self { + info!("{}: {} ({})", self.0.to_str(), self.1.to_str(), err); + self + } + + pub fn log_debug(self, err: impl std::fmt::Display) -> Self { + debug!("{}: {} ({})", self.0.to_str(), self.1.to_str(), err); + self + } + + pub fn log_trace(self, err: impl std::fmt::Display) -> Self { + trace!("{}: {} ({})", self.0.to_str(), self.1.to_str(), err); + self + } +} + +impl IntError for Error { + fn into_int_err(self) -> NonZeroI32 { + let origin = ((self.0 as i32 + 1) & 0xFFFi32) << 4; + let kind = ((self.1 as i32 + 1) & 0xFFFi32) << 16; + NonZeroI32::new(-(1 + origin + kind)).unwrap() + } + + fn from_int_err(err: NonZeroI32) -> Self { + let origin = ((-err.get() - 1) >> 4i32) & 0xFFFi32; + let kind = ((-err.get() - 1) >> 16i32) & 0xFFFi32; + + let error_origin = if origin > 0 && origin <= ErrorOrigin::Other as i32 + 1 { + unsafe { std::mem::transmute(origin as u16 - 1) } + } else { + ErrorOrigin::Other + }; + + let error_kind = if kind > 0 && kind <= ErrorKind::Unknown as i32 + 1 { + unsafe { std::mem::transmute(kind as u16 - 1) } + } else { + ErrorKind::Unknown + }; + + Self(error_origin, error_kind) + } } -/// Convert from &str to error -impl convert::From<&'static str> for Error { - fn from(error: &'static str) -> Self { - Error::Other(error) +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}: {}", self.0.to_str(), self.1.to_str()) } } -/// Convert from str::Utf8Error -impl From for Error { - fn from(_err: str::Utf8Error) -> Self { - Error::Encoding +#[cfg(feature = "std")] +impl error::Error for Error { + fn description(&self) -> &str { + self.as_str() } } /// Convert from PartialError impl From> for Error { - fn from(_err: PartialError) -> Self { - Error::Partial + fn from(err: PartialError) -> Self { + match err { + PartialError::Error(e) => e, + _ => Error(ErrorOrigin::Memory, ErrorKind::PartialData), + } } } -impl Error { - /// Returns a tuple representing the error description and its string value. - pub fn to_str_pair(self) -> (&'static str, Option<&'static str>) { - match self { - Error::Other(e) => ("other error", Some(e)), - Error::Partial => ("partial error", None), - Error::Bounds => ("out of bounds", None), - Error::IO(e) => ("io error", Some(e)), - Error::InvalidArchitecture => ("invalid architecture", None), - Error::Connector(e) => ("connector error", Some(e)), - Error::PhysicalMemory(e) => ("physical memory error", Some(e)), - Error::VirtualTranslate => ("virtual address translation failed", None), - Error::VirtualMemory(e) => ("virtual memory error", Some(e)), - Error::Encoding => ("encoding error", None), - } +impl From for Error { + fn from(origin: ErrorOrigin) -> Self { + Error(origin, ErrorKind::Unknown) } +} - /// Returns a simple string representation of the error. - pub fn to_str(self) -> &'static str { - self.to_str_pair().0 +impl From for Error { + fn from(kind: ErrorKind) -> Self { + Error(ErrorOrigin::Other, kind) } } -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let (desc, value) = self.to_str_pair(); +#[repr(u16)] +#[non_exhaustive] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +pub enum ErrorOrigin { + Pointer, - if let Some(value) = value { - write!(f, "{}: {}", desc, value) - } else { - f.write_str(desc) + Args, + ArgsValidator, + + Memory, + Mmu, + MemoryMap, + + PhysicalMemory, + VirtualTranslate, + Cache, + TlbCache, + PageCache, + VirtualMemory, + + Inventory, + Connector, + OsLayer, + Ffi, + + Other, +} + +impl ErrorOrigin { + /// Returns a static string representing the type of error. + pub fn to_str(self) -> &'static str { + match self { + ErrorOrigin::Pointer => "pointer", + + ErrorOrigin::Args => "args", + ErrorOrigin::ArgsValidator => "args validator", + + ErrorOrigin::Memory => "memory", + ErrorOrigin::Mmu => "mmu", + ErrorOrigin::MemoryMap => "memory map", + + ErrorOrigin::PhysicalMemory => "physical memory", + ErrorOrigin::VirtualTranslate => "virtual translate", + ErrorOrigin::Cache => "cache", + ErrorOrigin::TlbCache => "tlb cache", + ErrorOrigin::PageCache => "page cache", + ErrorOrigin::VirtualMemory => "virtual memory", + + ErrorOrigin::Inventory => "inventory", + ErrorOrigin::Connector => "connector", + ErrorOrigin::OsLayer => "oslayer", + ErrorOrigin::Ffi => "ffi", + + ErrorOrigin::Other => "other", } } } -#[cfg(feature = "std")] -impl error::Error for Error { - fn description(&self) -> &str { - self.to_str() +#[repr(u16)] +#[non_exhaustive] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +pub enum ErrorKind { + Uninitialized, + NotSupported, + NotImplemented, + Configuration, + Offset, + Http, + + ArgNotExists, + ArgValidation, + RequiredArgNotFound, + + InvalidArgument, + + PartialData, + + NotFound, + OutOfBounds, + OutOfMemoryRange, + Encoding, + + InvalidPath, + ReadOnly, + UnableToReadDir, + UnableToReadDirEntry, + UnableToReadFile, + UnableToCreateDirectory, + UnableToWriteFile, + UnableToSeekFile, + + UnableToMapFile, + MemoryMapOutOfRange, + UnableToReadMemory, + + InvalidArchitecture, + InvalidMemorySize, + InvalidMemorySizeUnit, + + UnableToLoadLibrary, + InvalidExeFile, + MemflowExportsNotFound, + VersionMismatch, + AlreadyExists, + PluginNotFound, + TargetNotFound, + InvalidAbi, + UnsupportedOptionalFeature, + + ProcessNotFound, + InvalidProcessInfo, + ModuleNotFound, + ExportNotFound, + ImportNotFound, + SectionNotFound, + + Unknown, +} + +impl ErrorKind { + /// Returns a static string representing the type of error. + pub fn to_str(self) -> &'static str { + match self { + ErrorKind::Uninitialized => "unitialized", + ErrorKind::NotSupported => "not supported", + ErrorKind::NotImplemented => "not implemented", + ErrorKind::Configuration => "configuration error", + ErrorKind::Offset => "offset error", + ErrorKind::Http => "http error", + + ErrorKind::ArgNotExists => "the given argument does not exist", + ErrorKind::ArgValidation => "the argument could not be validated", + ErrorKind::RequiredArgNotFound => "required argument is not set", + + ErrorKind::InvalidArgument => "invalid argument passed", + + ErrorKind::PartialData => "partial data", + + ErrorKind::NotFound => "not found", + ErrorKind::OutOfBounds => "out of bounds", + ErrorKind::OutOfMemoryRange => "out of memory range", + ErrorKind::Encoding => "encoding error", + + ErrorKind::InvalidPath => "invalid path", + ErrorKind::ReadOnly => "trying to write to a read only resource", + ErrorKind::UnableToReadDir => "unable to read directory", + ErrorKind::UnableToReadDirEntry => "unable to read directory entry", + ErrorKind::UnableToReadFile => "unable to read file", + ErrorKind::UnableToCreateDirectory => "unable to create directory", + ErrorKind::UnableToWriteFile => "unable to write file", + ErrorKind::UnableToSeekFile => "unable to seek file", + + ErrorKind::UnableToMapFile => "unable to map file", + ErrorKind::MemoryMapOutOfRange => "memory map is out of range", + ErrorKind::UnableToReadMemory => "unable to read memory", + + ErrorKind::InvalidArchitecture => "invalid architecture", + ErrorKind::InvalidMemorySize => "invalid memory size", + ErrorKind::InvalidMemorySizeUnit => "invalid memory size units (or none)", + + ErrorKind::UnableToLoadLibrary => "unable to load library", + ErrorKind::InvalidExeFile => "file is not a valid executable file", + ErrorKind::MemflowExportsNotFound => "file does not contain any memflow exports", + ErrorKind::VersionMismatch => "version mismatch", + ErrorKind::AlreadyExists => "already exists", + ErrorKind::PluginNotFound => "plugin not found", + ErrorKind::TargetNotFound => "specified (connector) target could not be found", + ErrorKind::InvalidAbi => "invalid plugin ABI", + ErrorKind::UnsupportedOptionalFeature => "unsupported optional feature", + + ErrorKind::ProcessNotFound => "process not found", + ErrorKind::InvalidProcessInfo => "invalid process info", + ErrorKind::ModuleNotFound => "module not found", + ErrorKind::ExportNotFound => "export not found", + ErrorKind::ImportNotFound => "import not found", + ErrorKind::SectionNotFound => "section not found", + + ErrorKind::Unknown => "unknown error", + } } } /// Specialized `PartialError` type for recoverable memflow errors. -#[derive(Copy, Clone, Eq, PartialEq, Hash)] +#[derive(Clone, Eq, PartialEq, Hash)] pub enum PartialError { /// Hard Error /// - /// Catch-all for all hard errors + /// Catch-all for all hard Error(Error), /// Partial Virtual Read Error /// @@ -131,7 +313,7 @@ pub enum PartialError { /// /// Error when a write from virtual memory only completed partially. /// This can usually happen when trying to read a page that is currently paged out. - PartialVirtualWrite, + PartialVirtualWrite(T), } /// Convert from Error @@ -142,18 +324,38 @@ impl From for PartialError { } impl PartialError { - /// Returns a tuple representing the error description and its string value. - pub fn to_str_pair(&self) -> (&'static str, Option<&'static str>) { + /// Returns a static string representing the type of error. + pub fn as_str(&self) -> &'static str { + match self { + PartialError::Error(e) => e.as_str(), + PartialError::PartialVirtualRead(_) => "partial virtual read", + PartialError::PartialVirtualWrite(_) => "partial virtual write", + } + } + + /// Returns a static string representing the type of error. + pub fn into_str(self) -> &'static str { + self.as_str() + } +} + +impl IntError for PartialError<()> { + fn into_int_err(self) -> NonZeroI32 { match self { - PartialError::Error(e) => ("other error", Some(e.to_str_pair().0)), - PartialError::PartialVirtualRead(_) => ("partial virtual read error", None), - PartialError::PartialVirtualWrite => ("partial virtual write error", None), + PartialError::Error(err) => err.into_int_err(), + PartialError::PartialVirtualRead(_) => NonZeroI32::new(-2).unwrap(), + PartialError::PartialVirtualWrite(_) => NonZeroI32::new(-3).unwrap(), } } - /// Returns a simple string representation of the error. - pub fn to_str(&self) -> &'static str { - self.to_str_pair().0 + fn from_int_err(err: NonZeroI32) -> Self { + let errc = (-err.get()) & 0xFi32; + match errc { + 1 => PartialError::Error(Error::from_int_err(err)), + 2 => PartialError::PartialVirtualRead(()), + 3 => PartialError::PartialVirtualWrite(()), + _ => PartialError::Error(Error(ErrorOrigin::Ffi, ErrorKind::Unknown)), + } } } @@ -167,12 +369,9 @@ impl fmt::Debug for PartialError { impl fmt::Display for PartialError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let (desc, value) = self.to_str_pair(); - - if let Some(value) = value { - write!(f, "{}: {}", desc, value) - } else { - f.write_str(desc) + match self { + PartialError::Error(e) => f.write_str(e.as_str()), + _ => f.write_str(self.as_str()), } } } @@ -180,7 +379,7 @@ impl fmt::Display for PartialError { #[cfg(feature = "std")] impl error::Error for PartialError { fn description(&self) -> &str { - self.to_str() + self.as_str() } } @@ -190,7 +389,7 @@ pub type Result = result::Result; /// Specialized `PartialResult` type for memflow results with recoverable errors. pub type PartialResult = result::Result>; -/// Specialized `PartialResult` exntesion for results. +/// Specialized `PartialResult` extension for results. pub trait PartialResultExt { /// Tries to extract the data from the `Result`. /// This will return a full error even if a partial error happened. @@ -211,7 +410,7 @@ impl PartialResultExt for PartialResult { fn data(self) -> Result { match self { Ok(data) => Ok(data), - Err(_) => Err(Error::Partial), + Err(_) => Err(Error(ErrorOrigin::Memory, ErrorKind::PartialData)), } } @@ -219,8 +418,8 @@ impl PartialResultExt for PartialResult { match self { Ok(data) => Ok(data), Err(PartialError::PartialVirtualRead(data)) => Ok(data), - //Err(Error::PartialVirtualWrite(data)) => Ok(data), - Err(_) => Err(Error::Partial), + Err(PartialError::PartialVirtualWrite(data)) => Ok(data), + Err(PartialError::Error(e)) => Err(e), } } @@ -229,7 +428,142 @@ impl PartialResultExt for PartialResult { Ok(data) => Ok(func(data)), Err(PartialError::Error(e)) => Err(PartialError::Error(e)), Err(PartialError::PartialVirtualRead(data)) => Ok(func(data)), - Err(PartialError::PartialVirtualWrite) => Err(PartialError::PartialVirtualWrite), + Err(PartialError::PartialVirtualWrite(data)) => Ok(func(data)), } } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::cglue::result::{ + from_int_result, from_int_result_empty, into_int_out_result, into_int_result, IntError, + }; + use std::mem::MaybeUninit; + use std::num::NonZeroI32; + + #[test] + pub fn error_from_i32_invalid() { + let mut err = Error::from_int_err(NonZeroI32::new(std::i32::MIN + 1).unwrap()); + assert_eq!(err.0, ErrorOrigin::Other); + assert_eq!(err.1, ErrorKind::Unknown); + + err = Error::from_int_err(NonZeroI32::new(-1).unwrap()); + assert_eq!(err.0, ErrorOrigin::Other); + assert_eq!(err.1, ErrorKind::Unknown); + + err = Error::from_int_err(NonZeroI32::new(-2).unwrap()); + assert_eq!(err.0, ErrorOrigin::Other); + assert_eq!(err.1, ErrorKind::Unknown); + + err = Error::from_int_err(NonZeroI32::new(-3).unwrap()); + assert_eq!(err.0, ErrorOrigin::Other); + assert_eq!(err.1, ErrorKind::Unknown); + } + + #[test] + pub fn part_error_from_i32_invalid() { + let mut result: PartialResult<()> = from_int_result_empty(-1); + assert!(result.is_err()); + assert_eq!( + result.err().unwrap(), + PartialError::Error(Error(ErrorOrigin::Other, ErrorKind::Unknown)) + ); + + result = from_int_result_empty(-2); + assert!(result.is_err()); + assert_eq!(result.err().unwrap(), PartialError::PartialVirtualRead(())); + + result = from_int_result_empty(-3); + assert!(result.is_err()); + assert_eq!(result.err().unwrap(), PartialError::PartialVirtualWrite(())); + + result = from_int_result_empty(-4); + assert!(result.is_err()); + assert_eq!( + result.err().unwrap(), + PartialError::Error(Error(ErrorOrigin::Ffi, ErrorKind::Unknown)) + ); + } + + #[test] + pub fn error_to_from_i32() { + let err = Error::from_int_err( + Error(ErrorOrigin::Other, ErrorKind::InvalidExeFile).into_int_err(), + ); + assert_eq!(err.0, ErrorOrigin::Other); + assert_eq!(err.1, ErrorKind::InvalidExeFile); + } + + #[test] + pub fn result_ok_void_ffi() { + let r: Result<()> = Ok(()); + let result: Result<()> = from_int_result_empty(into_int_result(r)); + assert!(result.is_ok()); + } + + #[test] + pub fn result_ok_value_ffi() { + let r: Result = Ok(1234i32); + let mut out = MaybeUninit::::uninit(); + let result: Result = unsafe { from_int_result(into_int_out_result(r, &mut out), out) }; + assert!(result.is_ok()); + assert_eq!(result.unwrap(), 1234i32); + } + + #[test] + pub fn result_error_void_ffi() { + let r: Result = Err(Error(ErrorOrigin::Other, ErrorKind::InvalidExeFile)); + let result: Result<()> = from_int_result_empty(into_int_result(r)); + assert!(result.is_err()); + assert_eq!(result.err().unwrap().0, ErrorOrigin::Other); + assert_eq!(result.err().unwrap().1, ErrorKind::InvalidExeFile); + } + + #[test] + pub fn result_error_value_ffi() { + let r: Result = Err(Error(ErrorOrigin::Other, ErrorKind::InvalidExeFile)); + let mut out = MaybeUninit::::uninit(); + let result: Result = unsafe { from_int_result(into_int_out_result(r, &mut out), out) }; + assert!(result.is_err()); + assert_eq!(result.err().unwrap().0, ErrorOrigin::Other); + assert_eq!(result.err().unwrap().1, ErrorKind::InvalidExeFile); + } + + #[test] + pub fn part_result_ok_void_ffi() { + let r: PartialResult<()> = Ok(()); + let result: PartialResult<()> = from_int_result_empty(into_int_result(r)); + assert!(result.is_ok()); + } + + #[test] + pub fn part_result_error_void_ffi() { + let r: PartialResult<()> = Err(PartialError::Error(Error( + ErrorOrigin::Other, + ErrorKind::InvalidExeFile, + ))); + let result: PartialResult<()> = from_int_result_empty(into_int_result(r)); + assert!(result.is_err()); + assert_eq!( + result.err().unwrap(), + PartialError::Error(Error(ErrorOrigin::Other, ErrorKind::InvalidExeFile)) + ); + } + + #[test] + pub fn part_result_part_error_read_ffi() { + let r: PartialResult<()> = Err(PartialError::PartialVirtualRead(())); + let result: PartialResult<()> = from_int_result_empty(into_int_result(r)); + assert!(result.is_err()); + assert_eq!(result.err().unwrap(), PartialError::PartialVirtualRead(())); + } + + #[test] + pub fn part_result_part_error_write_ffi() { + let r: PartialResult<()> = Err(PartialError::PartialVirtualWrite(())); + let result: PartialResult<()> = from_int_result_empty(into_int_result(r)); + assert!(result.is_err()); + assert_eq!(result.err().unwrap(), PartialError::PartialVirtualWrite(())); + } +} diff --git a/apex_dma/memflow_lib/memflow/src/iter/double_buffered_iterator.rs b/apex_dma/memflow_lib/memflow/src/iter/double_buffered_iterator.rs index dc9f2e6..0925736 100644 --- a/apex_dma/memflow_lib/memflow/src/iter/double_buffered_iterator.rs +++ b/apex_dma/memflow_lib/memflow/src/iter/double_buffered_iterator.rs @@ -39,7 +39,7 @@ where fn next(&mut self) -> Option { //If empty, buffer up the output deque if self.buf_out.is_empty() { - while let Some(elem) = self.iter.next() { + for elem in self.iter.by_ref() { match (self.fi)(elem) { (true, elem) => { self.buf.push_back(elem); diff --git a/apex_dma/memflow_lib/memflow/src/iter/mod.rs b/apex_dma/memflow_lib/memflow/src/iter/mod.rs index 7da51a5..c676e7a 100644 --- a/apex_dma/memflow_lib/memflow/src/iter/mod.rs +++ b/apex_dma/memflow_lib/memflow/src/iter/mod.rs @@ -3,7 +3,7 @@ Special purpose iterators for memflow. */ mod page_chunks; -use crate::types::Address; +use crate::types::{umem, Address}; pub use page_chunks::*; mod double_buffered_iterator; @@ -12,9 +12,6 @@ use double_buffered_iterator::*; mod doublepeek; pub use doublepeek::*; -mod void; -pub use void::FnExtend; - pub trait FlowIters: Iterator { /// Split an iterator to chunks, process them, and produce another iterator back /// @@ -79,7 +76,7 @@ pub trait PageChunks { /// # Examples /// /// ``` - /// use memflow::iter::PageChunks; + /// use memflow::prelude::{PageChunks, umem}; /// /// // Misaligned buffer length /// let buffer = vec![0; 0x1492]; @@ -96,7 +93,6 @@ pub trait PageChunks { /// println!("{}", page_count); /// /// ``` - fn page_chunks( self, start_address: Address, @@ -105,7 +101,18 @@ pub trait PageChunks { where Self: SplitAtIndex + Sized, { - PageChunkIterator::new(self, start_address, page_size, |_, _, _| true) + self.mem_chunks(start_address, page_size as umem) + } + + fn mem_chunks( + self, + start_address: Address, + mem_size: umem, + ) -> PageChunkIterator> + where + Self: SplitAtIndex + Sized, + { + PageChunkIterator::new(self, start_address, mem_size, |_, _, _| true) } /// Craete a page aligned chunk iterator with configurable splitting @@ -128,7 +135,7 @@ pub trait PageChunks { /// # Examples /// /// ``` - /// use memflow::iter::PageChunks; + /// use memflow::prelude::{PageChunks, umem}; /// /// let buffer = vec![0; 0x10000]; /// const PAGE_SIZE: usize = 0x100; @@ -142,8 +149,8 @@ pub trait PageChunks { /// // The rest - kept as is, linear. /// let chunk_count = buffer /// .page_chunks_by(0.into(), PAGE_SIZE, |addr, cur_split, _| { - /// ((addr.as_usize() / PAGE_SIZE) % PFN_MAGIC) == 0 - /// || (((addr + cur_split.len()).as_usize() / PAGE_SIZE) % PFN_MAGIC) == 0 + /// ((addr.to_umem() as usize / PAGE_SIZE) % PFN_MAGIC) == 0 + /// || (((addr + cur_split.len()).to_umem() as usize / PAGE_SIZE) % PFN_MAGIC) == 0 /// }) /// .count(); /// @@ -163,7 +170,19 @@ pub trait PageChunks { where Self: SplitAtIndex + Sized, { - PageChunkIterator::new(self, start_address, page_size, split_fn) + self.mem_chunks_by(start_address, page_size as umem, split_fn) + } + + fn mem_chunks_by) -> bool>( + self, + start_address: Address, + mem_size: umem, + split_fn: F, + ) -> PageChunkIterator + where + Self: SplitAtIndex + Sized, + { + PageChunkIterator::new(self, start_address, mem_size, split_fn) } } @@ -172,6 +191,7 @@ impl PageChunks for T where T: SplitAtIndex {} #[cfg(test)] mod tests { use crate::iter::PageChunks; + use crate::types::Address; const PAGE_SIZE: usize = 97; const OFF: usize = 26; @@ -219,16 +239,16 @@ mod tests { fn pc_check_all_aligned_zero() { let arr = [0_u8; 0x1000]; - for (addr, _chunk) in arr.page_chunks(0.into(), PAGE_SIZE) { + for (addr, _chunk) in arr.page_chunks(Address::null(), PAGE_SIZE) { assert_eq!(addr.as_page_aligned(PAGE_SIZE), addr); } } #[test] fn pc_check_all_chunks_equal() { - let arr = [0_u8; 100 * PAGE_SIZE]; + let arr = [0_u8; (100 * PAGE_SIZE)]; - for (_addr, chunk) in arr.page_chunks(0.into(), PAGE_SIZE) { + for (_addr, chunk) in arr.page_chunks(Address::null(), PAGE_SIZE) { println!("{:x} {:x}", _addr, chunk.len()); assert_eq!(chunk.len(), PAGE_SIZE); } @@ -237,7 +257,7 @@ mod tests { #[test] fn pc_check_all_chunks_equal_first_not() { const OFF: usize = 26; - let arr = [0_u8; 100 * PAGE_SIZE + (PAGE_SIZE - OFF)]; + let arr = [0_u8; (100 * PAGE_SIZE + (PAGE_SIZE - OFF)) as usize]; let mut page_iter = arr.page_chunks(OFF.into(), PAGE_SIZE); @@ -255,7 +275,7 @@ mod tests { #[test] fn pc_check_everything() { const TOTAL_LEN: usize = 100 * PAGE_SIZE + ADDEND - OFF; - let arr = [0_u8; TOTAL_LEN]; + let arr = [0_u8; TOTAL_LEN as usize]; let mut cur_len = 0; let mut prev_len = 0; @@ -284,9 +304,9 @@ mod tests { #[test] fn pc_check_size_hint() { const PAGE_COUNT: usize = 5; - let arr = [0_u8; PAGE_SIZE * PAGE_COUNT]; + let arr = [0_u8; (PAGE_SIZE as usize * PAGE_COUNT)]; assert_eq!( - arr.page_chunks(0.into(), PAGE_SIZE).size_hint().0, + arr.page_chunks(Address::null(), PAGE_SIZE).size_hint().0, PAGE_COUNT ); assert_eq!( @@ -304,4 +324,10 @@ mod tests { PAGE_COUNT ); } + + #[test] + fn pc_check_empty() { + let arr = [0_u8; 0]; + let _ = arr.page_chunks(Address::null(), PAGE_SIZE).next(); + } } diff --git a/apex_dma/memflow_lib/memflow/src/iter/page_chunks.rs b/apex_dma/memflow_lib/memflow/src/iter/page_chunks.rs index d247673..3df29a3 100644 --- a/apex_dma/memflow_lib/memflow/src/iter/page_chunks.rs +++ b/apex_dma/memflow_lib/memflow/src/iter/page_chunks.rs @@ -1,75 +1,141 @@ -use crate::types::Address; +use crate::cglue::{CSliceMut, CSliceRef, CTup2, CTup3}; +use crate::types::{clamp_to_usize, imem, umem, Address}; +use core::convert::TryInto; use std::iter::*; -/// This trait indicates that it is safe to not have to call unsplit for the object -/// -/// Some objects implementing `SplitAtIndex` may only do so by mutating its internal state, however, -/// if it is possible to do without doing so, implement this trait as well to allow structures that -/// use splittable objects, but may not call unsplit afterwards use your type genericly. -pub trait SplitAtIndexNoMutation: SplitAtIndex {} - pub trait SplitAtIndex { - fn split_at(&mut self, idx: usize) -> (Self, Option) + /// Split data at a given index + /// + /// This method will split the underlying data at a given index into up to 2 possible values. + /// + /// What a split means very much depends on the underlying type. sizes are split literally, + /// into 2 sizes, one being up to idx, the other being what's left over. Slices are split into + /// subslices. (Address, impl SplitAtIndex) pairs are split very much like slices (with Address + /// describing the starting address of the data, and the second element being pretty much + /// anything). + /// + /// But the core idea is - to allow splittable data, be split, in a generic way. + fn split_at(self, idx: umem) -> (Option, Option) + where + Self: Sized; + + /// Split data using mutable reference + /// + /// This should behave the same as split_at, but work with mutable ref being input, instead of + /// the actual value being consumed. This is useful when splitting slices and needing to + /// unsplit them. + /// + /// # Safety + /// + /// Mutating self reference and returned values after the split is undefined behaviour, + /// because both self, and returned values can point to the same mutable region + /// (for example: &mut [u8]) + unsafe fn split_at_mut(&mut self, idx: umem) -> (Option, Option) where Self: Sized; - fn split_inclusive_at(&mut self, idx: usize) -> (Self, Option) + /// Inclusive version of `split_at` + /// + /// This is effectively split_at(idx + 1), with a safeguard for idx == usize::MAX. + fn split_inclusive_at(self, idx: umem) -> (Option, Option) where Self: Sized, { - if idx == core::usize::MAX && self.length() != 0 { - //This is a pretty sketchy implementation, but it will be correct when overflows are a problem. - let (_, right) = self.split_at(0); - (right.unwrap(), None) + if idx == umem::MAX { + (Some(self), None) } else { self.split_at(idx + 1) } } - fn split_at_rev(&mut self, idx: usize) -> (Option, Self) + /// Inclusive version of `split_at_mut` + /// + /// This is effectively split_at_mut(idx + 1), with a safeguard for idx == usize::MAX. + /// + /// # Safety + /// + /// The same safety rules apply as with `split_at_mut`. Mutating the value after the function + /// call is undefined, and should not be done until returned values are dropped. + unsafe fn split_inclusive_at_mut(&mut self, idx: umem) -> (Option, Option) where Self: Sized, { - let (left, right) = self.split_inclusive_at(self.length() - idx); - ( - if left.length() == 0 { None } else { Some(left) }, - right.unwrap(), - ) + if idx == umem::MAX { + let (_, right) = self.split_at_mut(0); + (right, None) + } else { + self.split_at_mut(idx + 1) + } } - fn unsplit(&mut self, _left: Self, _right: Option) + /// Reverse version of `split_at` + /// + /// This will perform splits with index offsetting from the end of the data + fn split_at_rev(self, idx: umem) -> (Option, Option) where Self: Sized, { + if let Some(idx) = self.length().checked_sub(idx) { + self.split_inclusive_at(idx) + } else { + (None, Some(self)) + } } - fn length(&self) -> usize; + /// Returns the length of the data + /// + /// This is the length in terms of how many indexes can be used to split the data. + fn length(&self) -> umem; + /// Returns an allocation size hint for the data + /// + /// This is purely a hint, but not really an exact value of how much data needs allocating. fn size_hint(&self) -> usize { - self.length() + clamp_to_usize(self.length()) } } -impl SplitAtIndexNoMutation for usize {} - +#[cfg(any(feature = "64_bit_mem", feature = "128_bit_mem"))] impl SplitAtIndex for usize { - fn split_inclusive_at(&mut self, idx: usize) -> (Self, Option) { - if *self == 0 || *self - 1 <= idx { - (*self, None) + fn split_at(self, idx: umem) -> (Option, Option) { + if idx == 0 { + (None, Some(self)) + } else if self as umem <= idx { + (Some(self), None) } else { - (idx + 1, Some(*self - idx - 1)) + (Some(idx as usize), Some(self - idx as usize)) } } - fn split_at(&mut self, idx: usize) -> (Self, Option) { - if (*self as usize) <= idx { - (*self, None) + unsafe fn split_at_mut(&mut self, idx: umem) -> (Option, Option) { + (*self).split_at(idx) + } + + fn length(&self) -> umem { + *self as umem + } + + fn size_hint(&self) -> usize { + 1 + } +} + +impl SplitAtIndex for umem { + fn split_at(self, idx: umem) -> (Option, Option) { + if idx == 0 { + (None, Some(self)) + } else if self <= idx { + (Some(self), None) } else { - (idx, Some(*self - idx)) + (Some(idx as umem), Some(self - idx)) } } - fn length(&self) -> usize { + unsafe fn split_at_mut(&mut self, idx: umem) -> (Option, Option) { + (*self).split_at(idx) + } + + fn length(&self) -> umem { *self } @@ -78,32 +144,30 @@ impl SplitAtIndex for usize { } } -impl SplitAtIndexNoMutation for (Address, T) {} - impl SplitAtIndex for (Address, T) { - fn split_inclusive_at(&mut self, idx: usize) -> (Self, Option) { - let (left, right) = self.1.split_inclusive_at(idx); + fn split_at(self, idx: umem) -> (Option, Option) { + let (left, right) = self.1.split_at(idx); - if let Some(right) = right { + if let Some(left) = left { let left_len = left.length(); - ((self.0, left), Some((self.0 + left_len, right))) + (Some((self.0, left)), Some(self.0 + left_len).zip(right)) } else { - ((self.0, left), None) + (None, Some(self.0).zip(right)) } } - fn split_at(&mut self, idx: usize) -> (Self, Option) { - let (left, right) = self.1.split_at(idx); + unsafe fn split_at_mut(&mut self, idx: umem) -> (Option, Option) { + let (left, right) = self.1.split_at_mut(idx); - if let Some(right) = right { + if let Some(left) = left { let left_len = left.length(); - ((self.0, left), Some((self.0 + left_len, right))) + (Some((self.0, left)), Some(self.0 + left_len).zip(right)) } else { - ((self.0, left), None) + (None, Some(self.0).zip(right)) } } - fn length(&self) -> usize { + fn length(&self) -> umem { self.1.length() } @@ -112,71 +176,251 @@ impl SplitAtIndex for (Address, T) { } } -impl SplitAtIndexNoMutation for &[T] {} - impl SplitAtIndex for &[T] { - fn split_inclusive_at(&mut self, idx: usize) -> (Self, Option) { - let mid = core::cmp::min(self.len(), core::cmp::min(self.len(), idx) + 1); - let (left, right) = (*self).split_at(mid); - (left, if right.is_empty() { None } else { Some(right) }) + fn split_at(self, idx: umem) -> (Option, Option) { + let (left, right) = (*self).split_at(core::cmp::min(self.len(), clamp_to_usize(idx))); + ( + if left.is_empty() { None } else { Some(left) }, + if right.is_empty() { None } else { Some(right) }, + ) } - fn split_at(&mut self, idx: usize) -> (Self, Option) { - let (left, right) = (*self).split_at(core::cmp::min(self.len(), idx)); - (left, if right.is_empty() { None } else { Some(right) }) + unsafe fn split_at_mut(&mut self, idx: umem) -> (Option, Option) { + let (left, right) = (*self).split_at(core::cmp::min(self.len(), clamp_to_usize(idx))); + ( + if left.is_empty() { None } else { Some(left) }, + if right.is_empty() { None } else { Some(right) }, + ) } - fn length(&self) -> usize { - self.len() + fn length(&self) -> umem { + self.len() as umem } } -impl SplitAtIndexNoMutation for &mut [T] {} - impl SplitAtIndex for &mut [T] { - fn split_inclusive_at(&mut self, idx: usize) -> (Self, Option) { - let mid = core::cmp::min(self.len(), core::cmp::min(self.len(), idx) + 1); + fn split_at(self, idx: umem) -> (Option, Option) { + let (left, right) = (*self).split_at_mut(core::cmp::min(self.len(), clamp_to_usize(idx))); + ( + if left.is_empty() { None } else { Some(left) }, + if right.is_empty() { None } else { Some(right) }, + ) + } + + unsafe fn split_at_mut(&mut self, idx: umem) -> (Option, Option) { + let mid = core::cmp::min(self.len(), clamp_to_usize(idx)); let ptr = self.as_mut_ptr(); ( - unsafe { core::slice::from_raw_parts_mut(ptr, mid) }, + if mid != 0 { + Some(core::slice::from_raw_parts_mut(ptr, mid)) + } else { + None + }, + if mid != self.len() { + Some(core::slice::from_raw_parts_mut( + ptr.add(mid), + self.len() - mid, + )) + } else { + None + }, + ) + } + + fn length(&self) -> umem { + self.len() as umem + } +} + +impl<'a, T> SplitAtIndex for CSliceRef<'a, T> { + fn split_at(self, idx: umem) -> (Option, Option) { + let sliced = unsafe { core::slice::from_raw_parts(self.as_ptr(), self.len()) }; + let (left, right) = (*sliced).split_at(core::cmp::min(self.len(), clamp_to_usize(idx))); + ( + if left.is_empty() { + None + } else { + Some(left.into()) + }, + if right.is_empty() { + None + } else { + Some(right.into()) + }, + ) + } + + unsafe fn split_at_mut(&mut self, idx: umem) -> (Option, Option) { + let mid = core::cmp::min(self.len(), clamp_to_usize(idx)); + let ptr = self.as_ptr(); + ( + if mid != 0 { + Some(core::slice::from_raw_parts(ptr, mid).into()) + } else { + None + }, if mid != self.len() { - Some(unsafe { core::slice::from_raw_parts_mut(ptr.add(mid), self.len() - mid) }) + Some(core::slice::from_raw_parts(ptr.add(mid), self.len() - mid).into()) } else { None }, ) } - fn split_at(&mut self, idx: usize) -> (Self, Option) { - let mid = core::cmp::min(self.len(), idx); + fn length(&self) -> umem { + self.len() as umem + } +} + +impl<'a, T> SplitAtIndex for CSliceMut<'a, T> { + fn split_at(self, idx: umem) -> (Option, Option) { + let sliced = unsafe { core::slice::from_raw_parts_mut(self.as_mut_ptr(), self.len()) }; + let (left, right) = (*sliced).split_at_mut(core::cmp::min(self.len(), clamp_to_usize(idx))); + ( + if left.is_empty() { + None + } else { + Some(left.into()) + }, + if right.is_empty() { + None + } else { + Some(right.into()) + }, + ) + } + + unsafe fn split_at_mut(&mut self, idx: umem) -> (Option, Option) { + let mid = core::cmp::min(self.len(), clamp_to_usize(idx)); let ptr = self.as_mut_ptr(); ( - unsafe { core::slice::from_raw_parts_mut(ptr, mid) }, + if mid != 0 { + Some(core::slice::from_raw_parts_mut(ptr, mid).into()) + } else { + None + }, if mid != self.len() { - Some(unsafe { core::slice::from_raw_parts_mut(ptr.add(mid), self.len() - mid) }) + Some(core::slice::from_raw_parts_mut(ptr.add(mid), self.len() - mid).into()) } else { None }, ) } - fn length(&self) -> usize { - self.len() + fn length(&self) -> umem { + self.len() as umem + } +} + +impl SplitAtIndex for CTup2 { + fn split_at(self, idx: umem) -> (Option, Option) { + let (left, right) = self.1.split_at(idx); + + if let Some(left) = left { + let left_len = left.length(); + ( + Some(CTup2(self.0, left)), + Some(self.0 + left_len).zip(right).map(<_>::into), + ) + } else { + (None, Some(self.0).zip(right).map(<_>::into)) + } + } + + unsafe fn split_at_mut(&mut self, idx: umem) -> (Option, Option) { + let (left, right) = self.1.split_at_mut(idx); + + if let Some(left) = left { + let left_len = left.length(); + ( + Some(CTup2(self.0, left)), + Some(self.0 + left_len).zip(right).map(<_>::into), + ) + } else { + (None, Some(self.0).zip(right).map(<_>::into)) + } + } + + fn length(&self) -> umem { + self.1.length() + } + + fn size_hint(&self) -> usize { + self.1.size_hint() + } +} +impl SplitAtIndex for CTup3 { + fn split_at(self, idx: umem) -> (Option, Option) { + let (left, right) = self.2.split_at(idx); + + let meta = self.1; + + if let Some(left) = left { + let left_len = left.length(); + ( + Some(CTup3(self.0, meta, left)), + Some(self.0 + left_len) + .zip(right) + .map(|(a, b)| (a, meta + left_len, b)) + .map(<_>::into), + ) + } else { + ( + None, + Some(self.0) + .zip(right) + .map(|(a, b)| (a, meta, b)) + .map(<_>::into), + ) + } + } + + unsafe fn split_at_mut(&mut self, idx: umem) -> (Option, Option) { + let (left, right) = self.2.split_at_mut(idx); + + let meta = self.1; + + if let Some(left) = left { + let left_len = left.length(); + ( + Some(CTup3(self.0, meta, left)), + Some(self.0 + left_len) + .zip(right) + .map(|(a, b)| (a, meta + left_len, b)) + .map(<_>::into), + ) + } else { + ( + None, + Some(self.0) + .zip(right) + .map(|(a, b)| (a, meta, b)) + .map(<_>::into), + ) + } + } + + fn length(&self) -> umem { + self.2.length() + } + + fn size_hint(&self) -> usize { + self.2.size_hint() } } pub struct PageChunkIterator { v: Option, cur_address: Address, - page_size: usize, + page_size: umem, check_split_fn: FS, - cur_off: usize, + cur_off: umem, } impl PageChunkIterator { - pub fn new(buf: T, start_address: Address, page_size: usize, check_split_fn: FS) -> Self { + pub fn new(buf: T, start_address: Address, page_size: umem, check_split_fn: FS) -> Self { Self { - v: Some(buf), + v: if buf.length() == 0 { None } else { Some(buf) }, cur_address: start_address, page_size, check_split_fn, @@ -192,30 +436,30 @@ impl) -> bool> Iterator #[inline] fn next(&mut self) -> Option { - let v = core::mem::replace(&mut self.v, None); + let v = self.v.take(); if let Some(mut buf) = v { loop { let end_len = Address::from( self.cur_address - .as_u64() - .wrapping_add(self.page_size as u64), + .to_umem() + .wrapping_add(self.page_size as umem), ) - .as_page_aligned(self.page_size) - .as_usize() - .wrapping_sub(self.cur_address.as_usize()) + .as_mem_aligned(self.page_size) + .to_umem() + .wrapping_sub(self.cur_address.to_umem()) .wrapping_sub(1) .wrapping_add(self.cur_off); - let (head, tail) = buf.split_inclusive_at(end_len); + let (head, tail) = unsafe { buf.split_inclusive_at_mut(end_len) }; + let head = head.unwrap(); if tail.is_some() && !(self.check_split_fn)(self.cur_address, &head, tail.as_ref()) { self.cur_off = end_len + 1; - buf.unsplit(head, tail); } else { self.v = tail; let next_address = - Address::from(self.cur_address.as_usize().wrapping_add(end_len + 1)); + Address::from(self.cur_address.to_umem().wrapping_add(end_len + 1)); let ret = Some((self.cur_address, head)); self.cur_address = next_address; self.cur_off = 0; @@ -230,10 +474,13 @@ impl) -> bool> Iterator #[inline] fn size_hint(&self) -> (usize, Option) { if let Some(buf) = &self.v { - let n = ((self.cur_address + buf.size_hint() - 1).as_page_aligned(self.page_size) - - self.cur_address.as_page_aligned(self.page_size)) - / self.page_size - + 1; + let n: usize = (((self.cur_address + buf.size_hint() - 1_usize) + .as_mem_aligned(self.page_size) + - self.cur_address.as_mem_aligned(self.page_size)) + / self.page_size as imem + + 1) + .try_into() + .unwrap(); (n, Some(n)) } else { (0, Some(0)) diff --git a/apex_dma/memflow_lib/memflow/src/iter/void.rs b/apex_dma/memflow_lib/memflow/src/iter/void.rs deleted file mode 100644 index 43e76b4..0000000 --- a/apex_dma/memflow_lib/memflow/src/iter/void.rs +++ /dev/null @@ -1,32 +0,0 @@ -use std::prelude::v1::*; - -use std::marker::PhantomData; - -pub struct FnExtend { - func: F, - _phantom: PhantomData, -} - -impl FnExtend { - pub fn new(func: F) -> Self { - Self { - func, - _phantom: PhantomData::default(), - } - } -} - -impl FnExtend { - pub fn void() -> Self { - Self { - func: |_| {}, - _phantom: PhantomData::default(), - } - } -} - -impl Extend for FnExtend { - fn extend>(&mut self, iter: I) { - iter.into_iter().for_each(&mut self.func); - } -} diff --git a/apex_dma/memflow_lib/memflow/src/lib.rs b/apex_dma/memflow_lib/memflow/src/lib.rs index 437a638..1cfaad7 100644 --- a/apex_dma/memflow_lib/memflow/src/lib.rs +++ b/apex_dma/memflow_lib/memflow/src/lib.rs @@ -1,27 +1,227 @@ -/*! -memflow is a library that allows live memory introspection of running systems and their snapshots. -Due to its modular approach it trivial to support almost any scenario where Direct Memory Access is available. +//! +//! # memflow +//! +//! Machine introspection made easy +//! +//! ## Introduction +//! +//! memflow is a library that enables introspection of various machines (hardware, virtual machines, +//! memory dumps) in a generic fashion. There are 2 primary types of objects in memflow - _Connectors_ +//! and _OS layers_. Connector provides raw access to physical memory of a machine. Meanwhile, OS +//! layer builds a higher level abstraction over running operating system, providing access to running +//! processes, input events, etc. These objects are incredibly flexible as they can be chained together +//! to gain access to a process running multiple levels of virtualization deep (see figure below). +//! +//! ```text +//! +-----------+ +-----------+ +//! | native OS | | leechcore | +//! +-+---------+ +-+---------+ +//! | | +//! | +-----------+ | +----------+ +//! +->| QEMU VM | +->| Win32 OS | +//! +-+---------+ +-+--------+ +//! | | +//! | +----------+ | +-----------+ +//! +->| Win32 OS | +->| lsass.exe | +//! +-+--------+ +-----------+ +//! | +//! | +-----------+ +//! +->| Hyper-V | +//! +-+---------+ +//! | +//! | +----------+ +//! +->| Linux OS | +//! +-+--------+ +//! | +//! | +-----------+ +//! +->| SSHD Proc | +//! +-----------+ +//! +//! (Example chains of access. For illustrative purposes only - Hyper-V Connector and Linux OS are not yet available) +//! ``` +//! +//! As a library user, you do not have to worry about delicacies of chaining - everything is provided, +//! batteries included. See one of our [examples](memflow/examples/process_list.rs) on how simple it is to +//! build a chain (excluding parsing). All Connectors and OS layers are dynamically loadable with common +//! interface binding them. +//! +//! All of this flexibility is provided with very robust and efficient backend - memory interface is +//! batchable and divisible, which gets taken advantage of by our throughput optimized virtual address +//! translation pipeline that is able to walk the entire process virtual address space in under a second. +//! Connectors and OS layers can be composed with the vast library of generic caching mechanisms, utility +//! functions and data structures. +//! +//! The memflow ecosystem is not bound to just Rust - Connector and OS layer functions are linked together +//! using C ABI, thus users can write code that interfaces with them in other languages, such as C, C++, Zig, +//! etc. In addition, these plugins can too be implemented in foreign languages - everything is open. +//! +//! Overall, memflow is the most robust, efficient and flexible solution out there for machine introspection. +//! +//! # Structure +//! +//! memflow is separated into modules that are concerned with different parts of the ecosystem. +//! [mem](crate::mem) module is concerned with memory interfacing, [os](crate::os) module is +//! conerned with OS abstractions, [architecture](crate::architecture) module defines +//! specification of a computer architecture, as well as several built-in architectures, +//! [types](crate::types) concerns itself with data types used throughout memflow, while +//! [plugins](crate::plugins) module defines the dynamically loadable plugin types. +//! +//! ## Getting started +//! +//! To quickly get started with the memflow library, simply include its prelude: +//! +//! ``` +//! use memflow::prelude::v1::*; +//! ``` +//! +//! Afterwards, you will want to build a memflow object using the plugin inventory: +//! +//! ``` +//! # use memflow::prelude::v1::*; +//! # fn main() -> Result<()> { +//! let inventory = Inventory::scan(); +//! # let inventory = inventory.with_workspace()?; +//! +//! let conn = inventory.create_connector("dummy", None, None)?; +//! # Ok(()) +//! # } +//! +//! ``` +//! +//! ## Traits +//! +//! While Connectors and OS layers are the primary user facing objects, functionality of these +//! objects is provided through a set of traits. +//! +//! ### Core traits +//! +//! [MemoryView](crate::mem::memory_view::MemoryView) is the primary trait of issuing read and +//! write operations. Required functions are a bit intimidating, because memflow wants IO to be +//! batchable, which enables impressive performance, however, there are several helpers available +//! for performing simple read and write operations. +//! +//! [Os](crate::os::root::Os) and [OsInner](crate::os::root::OsInner) are the traits that deal with +//! higher level OS abstractions. They enable access to [Processes](crate::os::process::Process) +//! together with their MemoryViews. The reason for OsInner existance is lack of GATs, however, +//! this complexity should be removed as soon as the feature is +//! [stabilized](https://github.com/rust-lang/rust/pull/96709). +//! +//! [PhysicalMemory](crate::mem::phys_mem::PhysicalMemory) trait is implemented by connectors. It +//! embeds special metadata which is used by our memory caches, however is not much different from +//! MemoryView. Users performing physical reads may use the +//! [phys_view](crate::mem::phys_mem::PhysicalMemory::phys_view) function to access a view to this +//! physical address space and gain access to the helper methods. +//! +//! [VirtualTranslate](crate::mem::virt_translate::VirtualTranslate) trait is optionally provided +//! by processes in order to translate virtual addresses into physical ones. This is a lower level +//! trait. +//! +//! ### Class diagrams +//! +//! ```text +//! +----------------------------+ +----------------------------+ +//! | | | | +//! | Connector | | OS Layer | +//! | | | | +//! | +------------------------+ | | +------------------------+ | +//! | | PhysicalMemory | | | | OsInner | | +//! | +------------------------+ | | +------------------------+ | +//! | | | | +//! | +------------------------+ | | +------------------------+ | +//! | | Clone | | | | Clone | | +//! | +------------------------+ | | +------------------------+ | +//! | | | | +//! | Optional: | | Optional: | +//! | | | | +//! | +------------------------+ | | +------------------------+ | +//! | | ConnectorCpuState | | | | MemoryView | | +//! | +------------------------+ | | +------------------------+ | +//! | | | | +//! +----------------------------+ | +------------------------+ | +//! | | VirtualTranslate | | +//! | +------------------------+ | +//! | | +//! | +------------------------+ | +//! | | PhysicalMemory | | +//! | +------------------------+ | +//! | | +//! | +------------------------+ | +//! | | OsKeyboard | | +//! +----------------------------+ | +------------------------+ | +//! | | +----------------------------+ +//! | IntoProcessInstance | +//! | | +//! | +------------------------+ | +----------------------------+ +//! | | Process | | | | +//! | +------------------------+ | | ProcessInstance | +//! | | | | +//! | +------------------------+ | | +------------------------+ | +//! | | MemoryView | | | | Process | | +//! | +------------------------+ | | +------------------------+ | +//! | | | | +//! | +------------------------+ | | +------------------------+ | +//! | | Clone | | | | MemoryView | | +//! | +------------------------+ | | +------------------------+ | +//! | | | | +//! | Optional: | | Optional: | +//! | | | | +//! | +------------------------+ | | +------------------------+ | +//! | | VirtualTranslate | | | | VirtualTranslate | | +//! | +------------------------+ | | +------------------------+ | +//! | | | | +//! +----------------------------+ +----------------------------+ +//! ``` +//! +//! # Philosophy +//! +//! The core idea of memflow is to generalize where possible, specialize when needed. +//! +//! Best practice of writing memflow functions is to write them generically - use `impl Trait` +//! notation to define the type of object needed. This will allow for your code to work on both +//! dynamically loaded plugins, as well as custom, statically linked, and potentially more +//! efficient memory/OS objects. +//! +//! For instance, if you want to perform a memory read, define the function as follows: +//! +//! ``` +//! use memflow::prelude::v1::*; +//! # use memflow::dummy::{DummyMemory, DummyOs}; +//! +//! // Define the function with `impl Trait` notation +//! fn special_read(mem: &mut impl MemoryView) -> Result { +//! mem.read(Address::from(0x42)).data() +//! } +//! +//! // Use it with plugin object +//! let mut inventory = Inventory::scan(); +//! # let mut inventory = inventory.with_workspace().unwrap(); +//! let args = str::parse(":4m").unwrap(); +//! let conn = inventory.create_connector("dummy", None, Some(&args)) +//! .unwrap(); +//! +//! assert!(special_read(&mut conn.into_phys_view()).is_ok()); +//! +//! // Use it with statically built connector +//! let mut mem = DummyMemory::new(size::mb(4)); +//! +//! assert!(special_read(&mut mem.phys_view()).is_ok()); +//! +//! // Use it with statically built process +//! let mut proc = DummyOs::quick_process(size::mb(4), &[]); +//! +//! assert!(special_read(&mut proc).is_ok()); +//! ``` -The very core of the library is a [PhysicalMemory](mem/phys_mem/index.html) that provides direct memory access in an abstract environment. -This object that can be defined both statically, and dynamically with the use of the `inventory` feature. -If `inventory` is enabled, it is possible to dynamically load libraries that provide Direct Memory Access. - -Through the use of OS abstraction layers, like [memflow-win32](https://github.com/memflow/memflow/tree/master/memflow-win32), -user can gain access to virtual memory of individual processes, -by creating objects that implement [VirtualMemory](mem/virt_mem/index.html). - -Bridging the two is done by a highly throughput optimized virtual address translation function, -which allows for crazy fast memory transfers at scale. - -The core is architecture independent (as long as addresses fit in 64-bits), and currently both 32, -and 64-bit versions of the x86 family are available to be used. - -For non-rust libraries, it is possible to use the [FFI](https://github.com/memflow/memflow/tree/master/memflow-ffi) -to interface with the library. - -You will almost always import this module when working with memflow. -*/ +//#![warn(missing_docs)] +// due to the fact that umem equals u64 when compiling with a x86_64 target clippy issues false-positives on these conversions. +// targets other than x86_64 still might require those. +#![allow(clippy::unnecessary_cast)] +// this issue is triggered due to an issue in bitmask 1.x +// since upgrading to 2.x broke code generation via cglue-bindgen / cbindgen +// we are allowing this lint temporarily +#![allow(clippy::bad_bit_mask)] +// no-std-compat #![cfg_attr(not(feature = "std"), no_std)] extern crate no_std_compat as std; @@ -42,27 +242,59 @@ pub mod mem; pub mod connector; -pub mod process; +#[cfg(feature = "plugins")] +pub mod plugins; + +pub mod os; pub mod iter; +// forward declare +#[doc(hidden)] pub mod derive { - pub use memflow_derive::*; + pub use ::memflow_derive::*; +} + +#[doc(hidden)] +pub mod cglue { + pub use ::cglue::prelude::v1::*; +} + +#[doc(hidden)] +#[cfg(feature = "abi_stable")] +pub mod abi_stable { + pub use ::abi_stable::*; } +#[doc(hidden)] +pub mod dataview { + pub use ::dataview::*; + pub use ::memflow_derive::Pod; +} + +#[doc(hidden)] +#[cfg(any(feature = "dummy_mem", test))] +pub mod dummy; + +// TODO: modules should be cleanly seperated here and only necessary types should be exported +#[doc(hidden)] +#[allow(ambiguous_glob_reexports)] pub mod prelude { pub mod v1 { pub use crate::architecture::*; + pub use crate::cglue::*; pub use crate::connector::*; + pub use crate::dataview::*; pub use crate::derive::*; pub use crate::error::*; pub use crate::iter::*; pub use crate::mem::*; - pub use crate::process::*; + pub use crate::os::*; + #[cfg(feature = "plugins")] + pub use crate::plugins::os::*; + #[cfg(feature = "plugins")] + pub use crate::plugins::*; pub use crate::types::*; } pub use v1::*; } - -#[deprecated] -pub use prelude::v1::*; diff --git a/apex_dma/memflow_lib/memflow/src/mem/dummy.rs b/apex_dma/memflow_lib/memflow/src/mem/dummy.rs deleted file mode 100644 index e8c2b01..0000000 --- a/apex_dma/memflow_lib/memflow/src/mem/dummy.rs +++ /dev/null @@ -1,458 +0,0 @@ -use crate::architecture::x86::x64; -use crate::architecture::{ArchitectureObj, ScopedVirtualTranslate}; -use crate::connector::MappedPhysicalMemory; -use crate::error::Result; -use crate::mem::virt_mem::VirtualDMA; -use crate::mem::{ - MemoryMap, PhysicalMemory, PhysicalMemoryMetadata, PhysicalReadData, PhysicalWriteData, - VirtualMemory, -}; -use crate::process::{OsProcessInfo, OsProcessModuleInfo, PID}; -use crate::types::{size, Address}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, Rng, SeedableRng}; -use rand_xorshift::XorShiftRng; -use std::collections::VecDeque; - -use std::sync::Arc; - -use x86_64::{ - structures::paging, - structures::paging::{ - mapper::{Mapper, MapperAllSizes, OffsetPageTable}, - page::{PageSize, Size1GiB, Size2MiB, Size4KiB}, - page_table::{PageTable, PageTableFlags}, - FrameAllocator, PhysFrame, - }, - PhysAddr, VirtAddr, -}; - -#[derive(Clone, Copy, Debug)] -enum X64PageSize { - P4k = 0, - P2m = 1, - P1g = 2, -} - -impl X64PageSize { - fn to_size(self) -> usize { - match self { - X64PageSize::P4k => size::kb(4), - X64PageSize::P2m => size::mb(2), - X64PageSize::P1g => size::gb(1), - } - } - - fn to_idx(self) -> usize { - match self { - X64PageSize::P4k => 0, - X64PageSize::P2m => 1, - X64PageSize::P1g => 2, - } - } - - fn from_idx(idx: usize) -> Self { - match idx { - 2 => X64PageSize::P1g, - 1 => X64PageSize::P2m, - _ => X64PageSize::P4k, - } - } -} - -#[derive(Clone, Copy, Debug)] -struct PageInfo { - addr: Address, - size: X64PageSize, -} - -impl PageInfo { - fn split_to_size(&self, new_size: X64PageSize) -> Vec { - let mut ret = vec![]; - for o in 0..(self.size.to_size() / new_size.to_size()) { - ret.push(PageInfo { - addr: self.addr + new_size.to_size() * o, - size: new_size, - }); - } - ret - } - - fn split_down(&self) -> Vec { - self.split_to_size(X64PageSize::from_idx(self.size.to_idx() - 1)) - } -} - -pub struct DummyModule { - base: Address, - size: usize, -} - -impl OsProcessModuleInfo for DummyModule { - fn address(&self) -> Address { - Address::INVALID - } - - fn parent_process(&self) -> Address { - Address::INVALID - } - - fn base(&self) -> Address { - self.base - } - - fn size(&self) -> usize { - self.size - } - - fn name(&self) -> String { - String::from("dummy.so") - } -} - -pub struct DummyProcess { - address: Address, - map_size: usize, - pid: PID, - dtb: Address, -} - -impl DummyProcess { - pub fn get_module(&self, min_size: usize) -> DummyModule { - DummyModule { - base: self.address + thread_rng().gen_range(0, self.map_size / 2), - size: (thread_rng().gen_range(min_size, self.map_size) / 2), - } - } - - pub fn translator(&self) -> impl ScopedVirtualTranslate { - x64::new_translator(self.dtb) - } -} - -impl OsProcessInfo for DummyProcess { - fn address(&self) -> Address { - self.address - } - - fn pid(&self) -> PID { - self.pid - } - - fn name(&self) -> String { - String::from("Dummy") - } - - fn sys_arch(&self) -> ArchitectureObj { - x64::ARCH - } - - fn proc_arch(&self) -> ArchitectureObj { - x64::ARCH - } -} - -pub struct DummyMemory { - buf: Arc>, - mem: MappedPhysicalMemory<&'static mut [u8], MemoryMap<&'static mut [u8]>>, - page_list: VecDeque, - pt_pages: Vec, - last_pid: PID, - rng: XorShiftRng, -} - -impl Clone for DummyMemory { - fn clone(&self) -> Self { - let mut map = MemoryMap::new(); - map.push_range( - 0.into(), - self.buf.len().into(), - (self.buf.as_ptr() as u64).into(), - ); - - let mem = unsafe { MappedPhysicalMemory::from_addrmap_mut(map) }; - - Self { - buf: self.buf.clone(), - mem, - page_list: VecDeque::new(), - pt_pages: vec![], - last_pid: self.last_pid, - rng: self.rng.clone(), - } - } -} - -impl PhysicalMemory for DummyMemory { - fn phys_read_raw_list(&mut self, data: &mut [PhysicalReadData]) -> Result<()> { - self.mem.phys_read_raw_list(data) - } - - fn phys_write_raw_list(&mut self, data: &[PhysicalWriteData]) -> Result<()> { - self.mem.phys_write_raw_list(data) - } - - fn metadata(&self) -> PhysicalMemoryMetadata { - self.mem.metadata() - } -} - -unsafe impl FrameAllocator for DummyMemory -where - S: PageSize, -{ - fn allocate_frame(&mut self) -> Option> { - let new_page = self.alloc_pt_page(); - match PhysFrame::from_start_address(PhysAddr::new(new_page.addr.as_u64())) { - Ok(s) => Some(s), - _ => None, - } - } -} - -impl DummyMemory { - pub fn new_and_dtb(size: usize, virt_size: usize, buffer: &[u8]) -> (Self, Address, Address) { - let mut ret = Self::new(size); - let (dtb, virt_base) = ret.alloc_dtb(virt_size, buffer); - (ret, dtb, virt_base) - } - - pub fn new_virt(size: usize, virt_size: usize, buffer: &[u8]) -> (impl VirtualMemory, Address) { - let (ret, dtb, virt_base) = Self::new_and_dtb(size, virt_size, buffer); - let virt = VirtualDMA::new(ret, x64::ARCH, x64::new_translator(dtb)); - (virt, virt_base) - } - - pub fn new(size: usize) -> Self { - Self::with_rng(size, SeedableRng::from_rng(thread_rng()).unwrap()) - } - - pub fn with_seed(size: usize, seed: u64) -> Self { - Self::with_rng(size, SeedableRng::seed_from_u64(seed)) - } - - pub fn with_rng(size: usize, mut rng: XorShiftRng) -> Self { - let buf = Arc::new(vec![0_u8; size].into_boxed_slice()); - - let mut page_prelist = vec![]; - - let mut i = Address::from(0); - let size_addr = Address::from(size); - - while i < size_addr { - if let Some(page_info) = { - if size_addr - i >= X64PageSize::P1g.to_size() { - Some(PageInfo { - addr: i, - size: X64PageSize::P1g, - }) - } else if size_addr - i >= X64PageSize::P2m.to_size() { - Some(PageInfo { - addr: i, - size: X64PageSize::P2m, - }) - } else if size_addr - i >= X64PageSize::P4k.to_size() { - Some(PageInfo { - addr: i, - size: X64PageSize::P4k, - }) - } else { - None - } - } { - i += page_info.size.to_size(); - page_prelist.push(page_info); - } else { - break; - } - } - - let mut page_list: Vec = vec![]; - - let mut split = [2, 0, 0].to_vec(); - - for _ in 0..2 { - page_prelist.shuffle(&mut rng); - for i in page_prelist { - let mut list = if split[i.size.to_idx()] == 0 - || (split[i.size.to_idx()] != 2 && rng.gen::()) - { - split[i.size.to_idx()] = std::cmp::max(split[i.size.to_idx()], 1); - i.split_down() - } else { - [i].to_vec() - }; - - list.shuffle(&mut rng); - - for o in list { - page_list.push(o); - } - } - - page_prelist = page_list.clone(); - } - - let mut map = MemoryMap::new(); - map.push_range(0.into(), buf.len().into(), (buf.as_ptr() as u64).into()); - - let mem = unsafe { MappedPhysicalMemory::from_addrmap_mut(map) }; - - Self { - buf, - mem, - page_list: page_list.into(), - pt_pages: vec![], - last_pid: 0, - rng, - } - } - - //Given it's the tests, we will have a panic if out of mem - fn alloc_pt_page(&mut self) -> PageInfo { - if let Some(page) = self.pt_pages.pop() { - page - } else { - self.pt_pages = self - .page_list - .pop_front() - .unwrap() - .split_to_size(X64PageSize::P4k); - self.pt_pages.pop().unwrap() - } - } - - fn next_page_for_address(&mut self, _addr: Address) -> PageInfo { - self.alloc_pt_page() - } - - pub fn alloc_process(&mut self, map_size: usize, test_buf: &[u8]) -> DummyProcess { - let (dtb, address) = self.alloc_dtb(map_size, test_buf); - - self.last_pid += 1; - - DummyProcess { - address, - dtb, - pid: self.last_pid, - map_size, - } - } - - pub fn vtop(&mut self, dtb_base: Address, virt_addr: Address) -> Option
{ - let mut pml4 = unsafe { - &mut *(self - .buf - .as_ptr() - .add(dtb_base.as_usize()) - .cast::() as *mut _) - }; - - let pt_mapper = - unsafe { OffsetPageTable::new(&mut pml4, VirtAddr::from_ptr(self.buf.as_ptr())) }; - - match pt_mapper.translate_addr(VirtAddr::new(virt_addr.as_u64())) { - None => None, - Some(addr) => Some(Address::from(addr.as_u64())), - } - } - - pub fn alloc_dtb(&mut self, map_size: usize, test_buf: &[u8]) -> (Address, Address) { - let virt_base = (Address::null() - + self - .rng - .gen_range(0x0001_0000_0000_usize, ((!0_usize) << 20) >> 20)) - .as_page_aligned(size::gb(2)); - - ( - self.alloc_dtb_const_base(virt_base, map_size, test_buf), - virt_base, - ) - } - - pub fn alloc_dtb_const_base( - &mut self, - virt_base: Address, - map_size: usize, - test_buf: &[u8], - ) -> Address { - let mut cur_len = 0; - - let dtb = self.alloc_pt_page(); - - let mut pml4 = unsafe { - &mut *(self - .buf - .as_ptr() - .add(dtb.addr.as_usize()) - .cast::() as *mut _) - }; - *pml4 = PageTable::new(); - - let mut pt_mapper = - unsafe { OffsetPageTable::new(&mut pml4, VirtAddr::from_ptr(self.buf.as_ptr())) }; - - while cur_len < map_size { - let page_info = self.next_page_for_address(cur_len.into()); - let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE; - - if test_buf.len() >= (cur_len + page_info.size.to_size()) { - self.mem - .phys_write_raw( - page_info.addr.into(), - &test_buf[cur_len..(cur_len + page_info.size.to_size())], - ) - .unwrap(); - } else if test_buf.len() > cur_len { - self.mem - .phys_write_raw(page_info.addr.into(), &test_buf[cur_len..]) - .unwrap(); - } - - unsafe { - match page_info.size { - X64PageSize::P1g => pt_mapper - .map_to( - paging::page::Page::::from_start_address_unchecked( - VirtAddr::new((virt_base + cur_len).as_u64()), - ), - PhysFrame::from_start_address_unchecked(PhysAddr::new( - page_info.addr.as_u64(), - )), - flags, - self, - ) - .is_ok(), - X64PageSize::P2m => pt_mapper - .map_to( - paging::page::Page::::from_start_address_unchecked( - VirtAddr::new((virt_base + cur_len).as_u64()), - ), - PhysFrame::from_start_address_unchecked(PhysAddr::new( - page_info.addr.as_u64(), - )), - flags, - self, - ) - .is_ok(), - X64PageSize::P4k => pt_mapper - .map_to( - paging::page::Page::::from_start_address_unchecked( - VirtAddr::new((virt_base + cur_len).as_u64()), - ), - PhysFrame::from_start_address_unchecked(PhysAddr::new( - page_info.addr.as_u64(), - )), - flags, - self, - ) - .is_ok(), - }; - } - cur_len += page_info.size.to_size(); - } - - dtb.addr - } -} diff --git a/apex_dma/memflow_lib/memflow/src/mem/mem_data.rs b/apex_dma/memflow_lib/memflow/src/mem/mem_data.rs new file mode 100644 index 0000000..b118ed5 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/mem/mem_data.rs @@ -0,0 +1,131 @@ +//! Generic address and buffer association structure. + +use crate::types::{umem, Address, PageType, PhysicalAddress}; +use cglue::callback::{Callbackable, OpaqueCallback}; +use cglue::iter::CIterator; +use cglue::tuple::*; + +use cglue::slice::*; + +/// MemData type for regular memory reads. +pub type ReadDataRaw<'a> = CTup3>; +pub type ReadData<'a> = CTup2>; + +pub trait ReadRawIterator<'a>: Iterator> + 'a {} +impl<'a, T: Iterator> + 'a> ReadRawIterator<'a> for T {} + +pub trait ReadIterator<'a>: Iterator> + 'a {} +impl<'a, T: Iterator> + 'a> ReadIterator<'a> for T {} + +/// MemData type for regular memory writes. +pub type WriteDataRaw<'a> = CTup3>; +pub type WriteData<'a> = CTup2>; + +pub type VtopRange = CTup2; + +pub type MemoryRange = CTup3; + +pub trait WriteRawIterator<'a>: Iterator> + 'a {} +impl<'a, T: Iterator> + 'a> WriteRawIterator<'a> for T {} + +pub trait WriteIterator<'a>: Iterator> + 'a {} +impl<'a, T: Iterator> + 'a> WriteIterator<'a> for T {} + +/// MemData type for physical memory reads. +pub type PhysicalReadData<'a> = CTup3>; + +pub trait PhysicalReadIterator<'a>: Iterator> + 'a {} +impl<'a, T: Iterator> + 'a> PhysicalReadIterator<'a> for T {} + +/// MemData type for physical memory writes. +pub type PhysicalWriteData<'a> = CTup3>; + +pub trait PhysicalWriteIterator<'a>: Iterator> + 'a {} +impl<'a, T: Iterator> + 'a> PhysicalWriteIterator<'a> for T {} + +pub type ReadFailCallback<'a, 'b> = OpaqueCallback<'a, ReadDataRaw<'b>>; +pub type ReadCallback<'a, 'b> = OpaqueCallback<'a, ReadData<'b>>; + +pub type WriteFailCallback<'a, 'b> = OpaqueCallback<'a, WriteDataRaw<'b>>; +pub type WriteCallback<'a, 'b> = OpaqueCallback<'a, WriteData<'b>>; + +pub type MemoryRangeCallback<'a> = OpaqueCallback<'a, MemoryRange>; + +/// Data needed to perform memory operations. +/// +/// `inp` is an iterator containing +#[repr(C)] +#[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))] +pub struct MemOps<'a: 'c, 'b, 'c, T: 'b, P: 'a> { + pub inp: CIterator<'b, T>, + pub out: Option<&'c mut OpaqueCallback<'a, P>>, + pub out_fail: Option<&'c mut OpaqueCallback<'a, P>>, +} + +impl<'a: 'c, 'b, 'c, T: 'b, P: 'a> MemOps<'a, 'b, 'c, T, P> { + #[inline(always)] + pub fn with_raw_mut) -> O>( + iter: impl Into>, + out: Option<&'c mut OpaqueCallback<'a, P>>, + out_fail: Option<&'c mut OpaqueCallback<'a, P>>, + func: F, + ) -> O { + func(Self { + inp: iter.into(), + out, + out_fail, + }) + } + + #[inline(always)] + pub fn with_raw) -> O>( + mut iter: impl Iterator, + out: Option<&mut OpaqueCallback<'a, P>>, + out_fail: Option<&mut OpaqueCallback<'a, P>>, + func: F, + ) -> O { + func(MemOps { + inp: (&mut iter).into(), + out, + out_fail, + }) + } +} + +impl<'a: 'c, 'b, 'c, A: 'b + Into
+ Copy, T: 'b, P: 'a> + MemOps<'a, 'b, 'c, CTup3, P> +{ + #[inline(always)] + pub fn with, P>) -> O>( + iter: impl Iterator + 'a, + out: Option<&'c mut OpaqueCallback<'a, P>>, + out_fail: Option<&'c mut OpaqueCallback<'a, P>>, + func: F, + ) -> O { + let iter = iter.map(|(a, b)| CTup3(a, a.into(), b)); + Self::with_raw(iter, out, out_fail, func) + } +} + +impl<'a: 'c, 'b, 'c, T: 'b, I: Into>, P: 'a> From for MemOps<'a, 'b, 'c, T, P> { + fn from(inp: I) -> Self { + Self { + inp: inp.into(), + out: None, + out_fail: None, + } + } +} + +pub fn opt_call(cb: Option<&mut impl Callbackable>, data: T) -> bool { + cb.map(|cb| cb.call(data)).unwrap_or(true) +} + +pub type ReadRawMemOps<'buf, 'a, 'b, 'c> = MemOps<'a, 'b, 'c, ReadDataRaw<'buf>, ReadData<'buf>>; +pub type WriteRawMemOps<'buf, 'a, 'b, 'c> = MemOps<'a, 'b, 'c, WriteDataRaw<'buf>, WriteData<'buf>>; +pub type ReadMemOps<'buf, 'a, 'b, 'c> = MemOps<'a, 'b, 'c, ReadData<'buf>, ReadData<'buf>>; +pub type WriteMemOps<'buf, 'a, 'b, 'c> = MemOps<'a, 'b, 'c, WriteData<'buf>, WriteData<'buf>>; +pub type PhysicalReadMemOps<'buf, 'a, 'b, 'c> = + MemOps<'a, 'b, 'c, PhysicalReadData<'buf>, ReadData<'buf>>; +pub type PhysicalWriteMemOps<'buf, 'a, 'b, 'c> = + MemOps<'a, 'b, 'c, PhysicalWriteData<'buf>, WriteData<'buf>>; diff --git a/apex_dma/memflow_lib/memflow/src/mem/mem_map.rs b/apex_dma/memflow_lib/memflow/src/mem/mem_map.rs index 1b3f555..1a1b8f3 100644 --- a/apex_dma/memflow_lib/memflow/src/mem/mem_map.rs +++ b/apex_dma/memflow_lib/memflow/src/mem/mem_map.rs @@ -1,12 +1,19 @@ -use crate::error::{Error, Result}; -use crate::iter::{SplitAtIndex, SplitAtIndexNoMutation}; -use crate::types::{Address, PhysicalAddress}; +use crate::iter::SplitAtIndex; +use crate::types::{umem, Address, PhysicalAddress}; +use crate::mem::mem_data::opt_call; +use cglue::callback::*; +use cglue::tuple::*; use std::cmp::Ordering; +use std::convert::TryInto; use std::default::Default; use std::fmt; use std::prelude::v1::*; +// those only required when compiling under std environment +#[cfg(feature = "std")] +use crate::error::{Error, ErrorKind, ErrorOrigin, Result}; + /// The `MemoryMap`struct provides a mechanism to map addresses from the linear address space /// that memflow uses internally to hardware specific memory regions. /// @@ -15,8 +22,7 @@ use std::prelude::v1::*; /// # Examples /// /// ``` -/// use memflow::mem::MemoryMap; -/// use memflow::iter::FnExtend; +/// use memflow::prelude::{MemoryMap, CTup2, umem}; /// /// let mut map = MemoryMap::new(); /// map.push_remap(0x1000.into(), 0x1000, 0.into()); // push region from 0x1000 - 0x1FFF @@ -24,10 +30,13 @@ use std::prelude::v1::*; /// /// println!("{:?}", map); /// -/// // handle unmapped memory regions by using FnExtend::new, or just ignore them -/// let mut failed_void = FnExtend::void(); +/// // handle unmapped memory regions +/// let failed = &mut |CTup2(a, b)| { +/// println!("Unmapped: {} {}", a, b); +/// true +/// }; /// -/// let hw_addr = map.map(0x10ff.into(), 8, &mut failed_void); +/// let hw_addr = map.map(0x10ff.into(), 8 as umem, Some(failed)); /// ``` #[derive(Clone)] pub struct MemoryMap { @@ -56,7 +65,7 @@ impl MemoryMapping { } } -impl Default for MemoryMap { +impl Default for MemoryMap { fn default() -> Self { Self { mappings: Vec::new(), @@ -67,7 +76,7 @@ impl Default for MemoryMap { type InnerIter = std::vec::IntoIter>; type InnerFunc = fn(MemoryMapping) -> T; -impl IntoIterator for MemoryMap { +impl IntoIterator for MemoryMap { type Item = (Address, M); type IntoIter = std::iter::Map, InnerFunc>; @@ -78,7 +87,7 @@ impl IntoIterator for MemoryMap { } } -impl MemoryMap { +impl MemoryMap { /// Constructs a new memory map. /// /// This function is identical to `MemoryMap::default()`. @@ -86,6 +95,11 @@ impl MemoryMap { MemoryMap::default() } + // Returns `true` if there are no memory mappings. + pub fn is_empty(&self) -> bool { + self.mappings.is_empty() + } + /// Iterator over memory mappings pub fn iter(&self) -> impl Iterator> { self.mappings.iter() @@ -97,13 +111,36 @@ impl MemoryMap { /// (for buf-to-buf copies). /// /// Invalid regions get pushed to the `out_fail` parameter. This function requries `self` - pub fn map<'a, T: 'a + SplitAtIndex, V: Extend<(Address, T)>>( + pub fn map<'a, T: 'a + SplitAtIndex, V: Callbackable>>( &'a self, addr: Address, buf: T, - out_fail: &'a mut V, - ) -> impl Iterator + 'a { - MemoryMapIterator::new(&self.mappings, Some((addr, buf)).into_iter(), out_fail) + out_fail: Option<&'a mut V>, + ) -> impl Iterator> + 'a { + MemoryMapIterator::new( + &self.mappings, + Some(CTup3(addr, addr, buf)).into_iter(), + out_fail, + ) + } + + /// Maps a address range iterator to an address range. + /// + /// Output element lengths will both match, so there is no need to do additonal clipping + /// (for buf-to-buf copies). + /// + /// Invalid regions get pushed to the `out_fail` parameter + pub fn map_base_iter< + 'a, + T: 'a + SplitAtIndex, + I: 'a + Iterator>, + V: Callbackable>, + >( + &'a self, + iter: I, + out_fail: Option<&'a mut V>, + ) -> MemoryMapIterator<'a, I, M, T, V> { + MemoryMapIterator::new(&self.mappings, iter, out_fail) } /// Maps a address range iterator to a hardware address range. @@ -115,16 +152,16 @@ impl MemoryMap { pub fn map_iter< 'a, T: 'a + SplitAtIndex, - I: 'a + Iterator, - V: Extend<(Address, T)>, + I: 'a + Iterator>, + V: Callbackable>, >( &'a self, iter: I, - out_fail: &'a mut V, - ) -> impl Iterator + 'a { + out_fail: Option<&'a mut V>, + ) -> MemoryMapIterator<'a, impl Iterator> + 'a, M, T, V> { MemoryMapIterator::new( &self.mappings, - iter.map(|(addr, buf)| (addr.address(), buf)), + iter.map(|CTup3(addr, meta_addr, buf)| CTup3(addr.address(), meta_addr, buf)), out_fail, ) } @@ -171,6 +208,7 @@ impl MemoryMap { } #[cfg(feature = "serde")] +#[allow(unused)] #[derive(::serde::Deserialize)] struct MemoryMapFile { #[serde(rename = "range")] @@ -178,6 +216,7 @@ struct MemoryMapFile { } #[cfg(feature = "serde")] +#[allow(unused)] #[derive(::serde::Deserialize)] struct MemoryMapFileRange { base: u64, @@ -185,7 +224,19 @@ struct MemoryMapFileRange { real_base: Option, } -impl MemoryMap<(Address, usize)> { +// FFI Safe MemoryMapping type for `MemoryMap<(Address, umem)>`. +// TODO: this could be removed if the RefCell requirement above would be removed. +#[repr(C)] +#[derive(Debug, Clone, Copy)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +#[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))] +pub struct PhysicalMemoryMapping { + pub base: Address, + pub size: umem, + pub real_base: Address, +} + +impl MemoryMap<(Address, umem)> { /// Constructs a new memory map by parsing the mapping table from a [TOML](https://toml.io/) file. /// /// The file must contain a mapping table in the following format: @@ -204,14 +255,20 @@ impl MemoryMap<(Address, usize)> { /// The `real_base` parameter is optional. If it is not set there will be no re-mapping. #[cfg(feature = "memmapfiles")] pub fn open>(path: P) -> Result { - let contents = ::std::fs::read_to_string(path) - .map_err(|_| Error::Other("unable to open the memory mapping file"))?; - let mappings: MemoryMapFile = ::toml::from_str(&contents) - .map_err(|_| Error::Other("unable to parse the memory mapping toml file"))?; + let contents = ::std::fs::read_to_string(path).map_err(|err| { + Error(ErrorOrigin::MemoryMap, ErrorKind::UnableToReadFile) + .log_error(format!("unable to open the memory mapping file: {}", err)) + })?; + let mappings: MemoryMapFile = ::toml::from_str(&contents).map_err(|err| { + Error(ErrorOrigin::MemoryMap, ErrorKind::UnableToReadFile).log_error(format!( + "unable to parse the memory mapping toml file: {}", + err + )) + })?; let mut result = MemoryMap::new(); for range in mappings.ranges.iter() { - let real_base = range.real_base.unwrap_or_else(|| range.base); + let real_base = range.real_base.unwrap_or(range.base); result.push_range( range.base.into(), (range.base + range.length).into(), @@ -222,10 +279,25 @@ impl MemoryMap<(Address, usize)> { Ok(result) } + /// Returns the highest memory address that can be read. + pub fn max_address(&self) -> Address { + self.mappings + .iter() + .map(|m| m.base() + m.output.borrow().1) + .max() + .unwrap_or_else(|| umem::MAX.into()) + - 1_usize + } + + // Returns the real size the current memory mappings cover + pub fn real_size(&self) -> umem { + self.mappings.iter().fold(0, |s, m| s + m.output.borrow().1) + } + /// Adds a new memory mapping to this memory map by specifying base address and size of the mapping. /// /// When adding overlapping memory regions this function will panic! - pub fn push_remap(&mut self, base: Address, size: usize, real_base: Address) -> &mut Self { + pub fn push_remap(&mut self, base: Address, size: umem, real_base: Address) -> &mut Self { self.push(base, (real_base, size)) } @@ -236,7 +308,7 @@ impl MemoryMap<(Address, usize)> { /// If end < base, the function will do nothing pub fn push_range(&mut self, base: Address, end: Address, real_base: Address) -> &mut Self { if end > base { - self.push_remap(base, end - base, real_base) + self.push_remap(base, (end - base) as umem, real_base) } else { self } @@ -260,7 +332,10 @@ impl MemoryMap<(Address, usize)> { .map(|(base, (real_base, size))| { ( base, - std::slice::from_raw_parts_mut(real_base.as_u64() as _, size), + std::slice::from_raw_parts_mut( + real_base.to_umem() as _, + size.try_into().unwrap(), + ), ) }) .for_each(|(base, buf)| { @@ -284,7 +359,7 @@ impl MemoryMap<(Address, usize)> { .map(|(base, (real_base, size))| { ( base, - std::slice::from_raw_parts(real_base.as_u64() as _, size), + std::slice::from_raw_parts(real_base.to_umem() as _, size.try_into().unwrap()), ) }) .for_each(|(base, buf)| { @@ -293,27 +368,49 @@ impl MemoryMap<(Address, usize)> { ret_map } + + // TODO: into/from trait impls + pub fn into_vec(self) -> Vec { + self.iter() + .map(|m| PhysicalMemoryMapping { + base: m.base(), + size: m.output().1, + real_base: m.output().0, + }) + .collect::>() + } + + pub fn from_vec(mem_map: Vec) -> Self { + let mut map = Self::new(); + for mapping in mem_map.iter() { + map.push_range(mapping.base, mapping.base + mapping.size, mapping.real_base); + } + map + } } const MIN_BSEARCH_THRESH: usize = 32; -pub struct MemoryMapIterator<'a, I, M, T, F> { +pub type MapFailCallback<'a, T> = OpaqueCallback<'a, CTup3>; + +pub struct MemoryMapIterator<'a, I, M, T, C> { map: &'a [MemoryMapping], in_iter: I, - fail_out: &'a mut F, - cur_elem: Option<(Address, T)>, + fail_out: Option<&'a mut C>, + cur_elem: Option>, cur_map_pos: usize, } +#[allow(clippy::needless_option_as_deref)] impl< 'a, - I: Iterator, - M: SplitAtIndexNoMutation, + I: Iterator>, + M: SplitAtIndex, T: SplitAtIndex, - F: Extend<(Address, T)>, - > MemoryMapIterator<'a, I, M, T, F> + C: Callbackable>, + > MemoryMapIterator<'a, I, M, T, C> { - fn new(map: &'a [MemoryMapping], in_iter: I, fail_out: &'a mut F) -> Self { + fn new(map: &'a [MemoryMapping], in_iter: I, fail_out: Option<&'a mut C>) -> Self { Self { map, in_iter, @@ -323,8 +420,12 @@ impl< } } - fn get_next(&mut self) -> Option<(M, T)> { - if let Some((mut addr, mut buf)) = self.cur_elem.take() { + pub fn fail_out(&mut self) -> Option<&mut C> { + self.fail_out.as_deref_mut() + } + + fn get_next(&mut self) -> Option> { + if let Some(CTup3(mut addr, mut meta_addr, buf)) = self.cur_elem.take() { if self.map.len() >= MIN_BSEARCH_THRESH && self.cur_map_pos == 0 { self.cur_map_pos = match self.map.binary_search_by(|map_elem| { if map_elem.base > addr { @@ -342,19 +443,21 @@ impl< for (i, map_elem) in self.map.iter().enumerate().skip(self.cur_map_pos) { let output = &mut *map_elem.output.borrow_mut(); if map_elem.base + output.length() > addr { - let offset = map_elem.base.as_usize().saturating_sub(addr.as_usize()); + let offset: umem = map_elem.base.to_umem().saturating_sub(addr.to_umem()); let (left_reject, right) = buf.split_at(offset); - if left_reject.length() > 0 { - self.fail_out.extend(Some((addr, left_reject))); + if let Some(left_reject) = left_reject { + opt_call(self.fail_out.as_deref_mut(), CTup2(meta_addr, left_reject)); } addr += offset; + meta_addr += offset; - if let Some(mut leftover) = right { - let off = map_elem.base + output.length() - addr; + if let Some(leftover) = right { + let off = map_elem.base.to_umem() + output.length() - addr.to_umem(); let (ret, keep) = leftover.split_at(off); + let ret_length = ret.as_ref().map(|r| r.length()).unwrap_or_default(); let cur_map_pos = &mut self.cur_map_pos; let in_iter = &mut self.in_iter; @@ -364,23 +467,29 @@ impl< //If memory is in right order, this will skip the current mapping, //but not reset the search *cur_map_pos = i + 1; - (addr + ret.length(), x) + CTup3(addr + ret_length, meta_addr + ret_length, x) }) .or_else(|| { *cur_map_pos = 0; in_iter.next() }); - let off = addr - map_elem.base; - return Some(( - output.split_at(off).1.unwrap().split_at(ret.length()).0, - ret, - )); + let off = addr.to_umem() - map_elem.base.to_umem(); + let split_left = unsafe { output.split_at_mut(off).1 }; + return split_left + .unwrap() + .split_at(ret_length) + .0 + .zip(ret) + .map(|(a, b)| (a, meta_addr, b)) + .map(<_>::into); } - break; + return None; } } + + let _ = opt_call(self.fail_out.as_deref_mut(), CTup2(meta_addr, buf)); } None } @@ -388,13 +497,13 @@ impl< impl< 'a, - I: Iterator, - M: SplitAtIndexNoMutation, + I: Iterator>, + M: SplitAtIndex, T: SplitAtIndex, - F: Extend<(Address, T)>, - > Iterator for MemoryMapIterator<'a, I, M, T, F> + C: Callbackable>, + > Iterator for MemoryMapIterator<'a, I, M, T, C> { - type Item = (M, T); + type Item = CTup3; fn next(&mut self) -> Option { //Could optimize this and move over to new method, but would need to fuse the iter @@ -435,7 +544,7 @@ where } } -impl fmt::Debug for MemoryMapping<(Address, usize)> { +impl fmt::Debug for MemoryMapping<(Address, umem)> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, @@ -462,7 +571,6 @@ impl fmt::Debug for MemoryMapping<&mut [u8]> { #[cfg(test)] mod tests { use super::*; - use crate::iter::FnExtend; #[test] fn test_mapping() { @@ -470,13 +578,21 @@ mod tests { map.push_remap(0x1000.into(), 0x1000, 0.into()); map.push_remap(0x3000.into(), 0x1000, 0x2000.into()); - let mut void_panic = FnExtend::new(|x| panic!("Should not have mapped {:?}", x)); + let mut void_panic = |x| panic!("Should not have mapped {:?}", x); assert_eq!( - (map.map(0x10ff.into(), 1, &mut void_panic).next().unwrap().0).0, + (map.map::(0x10ff.into(), 1, Some(&mut void_panic)) + .next() + .unwrap() + .0) + .0, Address::from(0x00ff) ); assert_eq!( - (map.map(0x30ff.into(), 1, &mut void_panic).next().unwrap().0).0, + (map.map::(0x30ff.into(), 1, Some(&mut void_panic)) + .next() + .unwrap() + .0) + .0, Address::from(0x20ff) ); } @@ -487,19 +603,33 @@ mod tests { map.push_remap(0x1000.into(), 0x1000, 0.into()); map.push_remap(0x3000.into(), 0x1000, 0x2000.into()); - let mut void_panic = FnExtend::new(|x| panic!("Should not have mapped {:?}", x)); - let mut void = FnExtend::void(); + let mut void_panic = |x| panic!("Should not have mapped {:?}", x); + let mut void = |_| true; assert_eq!( - (map.map(0x3000.into(), 1, &mut void_panic).next().unwrap().0).0, + (map.map::(0x3000.into(), 1, Some(&mut void_panic)) + .next() + .unwrap() + .0) + .0, Address::from(0x2000) ); assert_eq!( - (map.map(0x3fff.into(), 1, &mut void_panic).next().unwrap().0).0, + (map.map::(0x3fff.into(), 1, Some(&mut void_panic)) + .next() + .unwrap() + .0) + .0, Address::from(0x2fff) ); - assert_eq!(map.map(0x2fff.into(), 1, &mut void).next(), None); - assert_eq!(map.map(0x4000.into(), 1, &mut void).next(), None); + assert_eq!( + map.map::(0x2fff.into(), 1, Some(&mut void)).next(), + None + ); + assert_eq!( + map.map::(0x4000.into(), 1, Some(&mut void)).next(), + None + ); } #[test] @@ -508,11 +638,30 @@ mod tests { map.push_remap(0x1000.into(), 0x1000, 0.into()); map.push_remap(0x3000.into(), 0x1000, 0x2000.into()); - let mut void = FnExtend::void(); - assert_eq!(map.map(0x00ff.into(), 1, &mut void).next(), None); - assert_eq!(map.map(0x20ff.into(), 1, &mut void).next(), None); - assert_eq!(map.map(0x4000.into(), 1, &mut void).next(), None); - assert_eq!(map.map(0x40ff.into(), 1, &mut void).next(), None); + let mut void = vec![]; + let mut cbvoid: OpaqueCallback<_> = (&mut void).into(); + assert_eq!( + map.map::(0x00ff.into(), 1, Some(&mut cbvoid)) + .next(), + None + ); + assert_eq!( + map.map::(0x20ff.into(), 1, Some(&mut cbvoid)) + .next(), + None + ); + assert_eq!( + map.map::(0x4000.into(), 1, Some(&mut cbvoid)) + .next(), + None + ); + assert_eq!( + map.map::(0x40ff.into(), 1, Some(&mut cbvoid)) + .next(), + None + ); + + assert_eq!(void.len(), 4); } #[test] @@ -521,13 +670,21 @@ mod tests { map.push_range(0x1000.into(), 0x2000.into(), 0.into()); map.push_range(0x3000.into(), 0x4000.into(), 0x2000.into()); - let mut void_panic = FnExtend::new(|x| panic!("Should not have mapped {:?}", x)); + let mut void_panic = |x| panic!("Should not have mapped {:?}", x); assert_eq!( - (map.map(0x10ff.into(), 1, &mut void_panic).next().unwrap().0).0, + (map.map::(0x10ff.into(), 1, Some(&mut void_panic)) + .next() + .unwrap() + .0) + .0, Address::from(0x00ff) ); assert_eq!( - (map.map(0x30ff.into(), 1, &mut void_panic).next().unwrap().0).0, + (map.map::(0x30ff.into(), 1, Some(&mut void_panic)) + .next() + .unwrap() + .0) + .0, Address::from(0x20ff) ); } @@ -538,19 +695,33 @@ mod tests { map.push_range(0x1000.into(), 0x2000.into(), 0.into()); map.push_range(0x3000.into(), 0x4000.into(), 0x2000.into()); - let mut void_panic = FnExtend::new(|x| panic!("Should not have mapped {:?}", x)); - let mut void = FnExtend::void(); + let mut void_panic = |x| panic!("Should not have mapped {:?}", x); + let mut void = |_| true; assert_eq!( - (map.map(0x3000.into(), 1, &mut void_panic).next().unwrap().0).0, + (map.map::(0x3000.into(), 1, Some(&mut void_panic)) + .next() + .unwrap() + .0) + .0, Address::from(0x2000) ); assert_eq!( - (map.map(0x3fff.into(), 1, &mut void_panic).next().unwrap().0).0, + (map.map::(0x3fff.into(), 1, Some(&mut void_panic)) + .next() + .unwrap() + .0) + .0, Address::from(0x2fff) ); - assert_eq!(map.map(0x2fff.into(), 1, &mut void).next(), None); - assert_eq!(map.map(0x4000.into(), 1, &mut void).next(), None); + assert_eq!( + map.map::(0x2fff.into(), 1, Some(&mut void)).next(), + None + ); + assert_eq!( + map.map::(0x4000.into(), 1, Some(&mut void)).next(), + None + ); } #[test] @@ -559,19 +730,33 @@ mod tests { map.push_range(0x1000.into(), 0x2000.into(), 0.into()); map.push_range(0x2000.into(), 0x3000.into(), 0x2000.into()); - let mut void_panic = FnExtend::new(|x| panic!("Should not have mapped {:?}", x)); - let mut void = FnExtend::void(); + let mut void_panic = |x| panic!("Should not have mapped {:?}", x); + let mut void = |_| true; assert_eq!( - (map.map(0x2000.into(), 1, &mut void_panic).next().unwrap().0).0, + (map.map::(0x2000.into(), 1, Some(&mut void_panic)) + .next() + .unwrap() + .0) + .0, Address::from(0x2000) ); assert_eq!( - (map.map(0x2fff.into(), 1, &mut void_panic).next().unwrap().0).0, + (map.map::(0x2fff.into(), 1, Some(&mut void_panic)) + .next() + .unwrap() + .0) + .0, Address::from(0x2fff) ); - assert_eq!(map.map(0x3fff.into(), 1, &mut void).next(), None); - assert_eq!(map.map(0x3000.into(), 1, &mut void).next(), None); + assert_eq!( + map.map::(0x3fff.into(), 1, Some(&mut void)).next(), + None + ); + assert_eq!( + map.map::(0x3000.into(), 1, Some(&mut void)).next(), + None + ); } #[test] @@ -604,6 +789,23 @@ mod tests { map.push_range(0x2000.into(), 0x20ff.into(), 0.into()); } + #[test] + fn test_max_address() { + let mut map = MemoryMap::new(); + map.push_remap(0x1000.into(), 0x1000, 0.into()); + map.push_remap(0x3000.into(), 0x1000, 0x2000.into()); + assert_eq!(map.max_address(), Address::from(0x3FFF)); + } + + #[test] + fn test_real_size() { + let mut map = MemoryMap::new(); + map.push_remap(0x1000.into(), 0x1000, 0.into()); + map.push_remap(0x3000.into(), 0x1000, 0x2000.into()); + map.push_remap(0x6000.into(), 0x2000, 0x3000.into()); + assert_eq!(map.real_size(), 0x4000); + } + #[cfg(feature = "memmapfiles")] #[test] fn test_load_toml() { diff --git a/apex_dma/memflow_lib/memflow/src/mem/memory_view/arch_overlay.rs b/apex_dma/memflow_lib/memflow/src/mem/memory_view/arch_overlay.rs new file mode 100644 index 0000000..7609dcf --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/mem/memory_view/arch_overlay.rs @@ -0,0 +1,53 @@ +//! Overlays a custom architecture on the memory view + +use super::*; +use crate::architecture::{ArchitectureObj, Endianess}; +use crate::error::*; + +/// Allows to overwrite the architecture of the memory view. +/// +/// Is useful when a 32 bit process runs in a 64 bit architecture, and a 64-bit Pointer is wanted +/// to be read with `read_ptr`. +#[repr(C)] +#[derive(Clone, Copy, Debug)] +pub struct ArchOverlayView { + mem: T, + arch_bits: u8, + little_endian: bool, +} + +impl ArchOverlayView { + pub fn new_parts(mem: T, arch_bits: u8, little_endian: bool) -> Self { + Self { + mem, + arch_bits, + little_endian, + } + } + + pub fn new(mem: T, arch: ArchitectureObj) -> Self { + Self::new_parts( + mem, + arch.bits(), + arch.endianess() == Endianess::LittleEndian, + ) + } +} + +impl MemoryView for ArchOverlayView { + fn read_raw_iter(&mut self, data: ReadRawMemOps) -> Result<()> { + self.mem.read_raw_iter(data) + } + + fn write_raw_iter(&mut self, data: WriteRawMemOps) -> Result<()> { + self.mem.write_raw_iter(data) + } + + fn metadata(&self) -> MemoryViewMetadata { + MemoryViewMetadata { + little_endian: self.little_endian, + arch_bits: self.arch_bits, + ..self.mem.metadata() + } + } +} diff --git a/apex_dma/memflow_lib/memflow/src/mem/memory_view/batcher.rs b/apex_dma/memflow_lib/memflow/src/mem/memory_view/batcher.rs new file mode 100644 index 0000000..9e7ac6c --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/mem/memory_view/batcher.rs @@ -0,0 +1,390 @@ +use std::prelude::v1::*; + +use super::*; +use crate::dataview::PodMethods; +use crate::error::PartialResult; +use crate::types::Address; + +/// A structure for batching memory reads and writes. +/// +/// # Examples +/// +/// ``` +/// use memflow::prelude::v1::*; +/// use memflow::dummy::DummyMemory; +/// # use memflow::dummy::DummyOs; +/// # use memflow::architecture::x86::x64; +/// +/// # let phys_mem = DummyMemory::new(size::mb(16)); +/// # let mut os = DummyOs::new(phys_mem); +/// # let (dtb, _) = os.alloc_dtb(size::mb(8), &[]); +/// # let phys_mem = os.into_inner(); +/// # let translator = x64::new_translator(dtb); +/// let mut virt_mem = VirtualDma::new(phys_mem, x64::ARCH, translator); +/// let mut batcher = MemoryViewBatcher::new(&mut virt_mem); +/// ``` +pub struct MemoryViewBatcher<'a, T: MemoryView> { + vmem: &'a mut T, + read_list: Vec>, + write_list: Vec>, +} + +impl<'a, T: MemoryView> MemoryViewBatcher<'a, T> { + /// Creates a new `MemoryViewBatcher` instance. + /// + /// # Examples + /// + /// ``` + /// use memflow::prelude::v1::*; + /// use memflow::dummy::DummyMemory; + /// # use memflow::dummy::DummyOs; + /// # use memflow::architecture::x86::x64; + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, _) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let mut virt_mem = VirtualDma::new(phys_mem, x64::ARCH, translator); + /// let mut batcher = MemoryViewBatcher::new(&mut virt_mem); + /// ``` + pub fn new(vmem: &'a mut T) -> Self { + Self { + vmem, + read_list: vec![], + write_list: vec![], + } + } + + /// Reserves capacity for the read list. + /// Reserves capacity for at least `additional` more elements to be handled + /// in the given `MemoryViewBatcher<'a, T>`. The internal collection may reserve + /// more space to speculatively avoid frequent reallocations. + /// + /// # Arguments + /// + /// * `capacity`: The number of operations to reserve space for. + /// + /// # Examples + /// + /// ``` + /// use memflow::prelude::v1::*; + /// use memflow::dummy::DummyMemory; + /// # use memflow::dummy::DummyOs; + /// # use memflow::architecture::x86::x64; + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, _) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let mut virt_mem = VirtualDma::new(phys_mem, x64::ARCH, translator); + /// let mut batcher = MemoryViewBatcher::new(&mut virt_mem); + /// + /// // Reserve space 10 operations + /// batcher.reserve(10); + /// ``` + /// + /// # Panics + /// + /// Panics if the new capacity exceeds `isize::MAX` bytes. + pub fn reserve(&mut self, capacity: usize) -> &mut Self { + self.read_list.reserve(capacity); + self + } + + /// Executes all pending operations in this batch. + /// + /// This also consumes and discards this batcher so it cannot be used anymore. + /// The same behavior can be achieved by implicitly calling `drop` on the batcher + /// (for example, when going out of scope). + /// + /// # Examples + /// + /// ``` + /// use memflow::prelude::v1::*; + /// use memflow::dummy::DummyMemory; + /// # use memflow::dummy::DummyOs; + /// # use memflow::architecture::x86::x64; + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, _) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let mut virt_mem = VirtualDma::new(phys_mem, x64::ARCH, translator); + /// let mut batcher = MemoryViewBatcher::new(&mut virt_mem); + /// + /// // commit the batch to memory, this is optional and just used to check if the operations succeed + /// batcher.commit_rw().unwrap(); + /// ``` + pub fn commit_rw(&mut self) -> PartialResult<()> { + if !self.read_list.is_empty() { + self.vmem.read_raw_list(&mut self.read_list)?; + self.read_list.clear(); + } + + if !self.write_list.is_empty() { + self.vmem.write_raw_list(&self.write_list)?; + self.write_list.clear(); + } + + Ok(()) + } + + /// Appends an iterator over read operations `ReadIter` to this batch. + /// + /// # Arguments + /// + /// * `iter`: An iterator over `ReadData` instances. + /// + /// # Examples + /// + /// ``` + /// use memflow::prelude::v1::*; + /// use memflow::dummy::DummyMemory; + /// # use memflow::dummy::DummyOs; + /// # use memflow::architecture::x86::x64; + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, virt_base) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let mut virt_mem = VirtualDma::new(phys_mem, x64::ARCH, translator); + /// + /// let addr = virt_base; // some arbitrary address + /// let mut buf = [0u8; 8]; + /// + /// // create the batcher + /// let mut batcher = MemoryViewBatcher::new(&mut virt_mem); + /// + /// // append the read command + /// batcher.read_raw_iter(std::iter::once(CTup2(addr, buf.as_mut().into())).into_iter()); + /// + /// // commit the batch to memory, this is optional and just used to check if the operations succeed + /// assert!(batcher.commit_rw().is_ok()); + /// ``` + pub fn read_raw_iter(&mut self, iter: impl ReadIterator<'a>) -> &mut Self { + self.read_list.extend(iter); + self + } + + /// Reads data from memory and stores it in the provided buffer. + /// + /// # Arguments + /// + /// * `addr`: The starting address to read from. + /// * `out`: A mutable reference to the buffer where the data will be stored. + /// + /// # Example + /// + /// ``` + /// use memflow::prelude::v1::*; + /// use memflow::dummy::DummyMemory; + /// # use memflow::dummy::DummyOs; + /// # use memflow::architecture::x86::x64; + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, virt_base) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let mut virt_mem = VirtualDma::new(phys_mem, x64::ARCH, translator); + /// + /// let addr = virt_base; // some arbitrary address + /// let write_data = [0x10, 0x20, 0x30, 0x40]; + /// let mut read_data = [0u8; 4]; + /// + /// { + /// // create batcher in a new scope + /// let mut batcher = MemoryViewBatcher::new(&mut virt_mem); + /// + /// // write the `write_data` array to memory + /// batcher.write_raw_into(addr, &write_data); + /// + /// // commit the batch to memory, this is optional and just used to check if the operations succeed + /// assert!(batcher.commit_rw().is_ok()); + /// } + /// + /// // check if the batched write was successful + /// virt_mem.read_raw_into(addr, &mut read_data).unwrap(); + /// assert_eq!(read_data, write_data); + /// ``` + pub fn write_raw_iter(&mut self, iter: impl WriteIterator<'a>) -> &mut Self { + self.write_list.extend(iter); + self + } + + /// Reads data from memory and stores it in the provided buffer. + /// + /// # Arguments + /// + /// * `addr` - The address to start reading from. + /// * `out` - The buffer to store the read data in. + /// + /// # Examples + /// + /// ``` + /// use memflow::prelude::v1::*; + /// use memflow::dummy::DummyMemory; + /// # use memflow::dummy::DummyOs; + /// # use memflow::architecture::x86::x64; + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, virt_base) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let mut virt_mem = VirtualDma::new(phys_mem, x64::ARCH, translator); + /// + /// let addr = virt_base; // some arbitrary address + /// let mut buffer = [0u8; 4]; + /// + /// let mut batcher = MemoryViewBatcher::new(&mut virt_mem); + /// + /// // read 4 bytes from some address and store the result in `buffer` + /// batcher.read_raw_into(addr, &mut buffer); + /// + /// // commit the batch to memory, this is optional and just used to check if the operations succeed + /// batcher.commit_rw().unwrap(); + /// ``` + pub fn read_raw_into<'b: 'a>(&mut self, addr: Address, out: &'b mut [u8]) -> &mut Self { + self.read_raw_iter(std::iter::once(CTup2(addr, out.into()))) + } + + /// Reads data from memory and stores it in the provided buffer. + /// + /// # Arguments + /// + /// * `addr` - The address to read from. + /// * `out` - The buffer to store the read data. + /// + /// # Example + /// + /// ``` + /// use memflow::prelude::v1::*; + /// use memflow::dummy::DummyMemory; + /// # use memflow::dummy::DummyOs; + /// # use memflow::architecture::x86::x64; + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, virt_base) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let mut virt_mem = VirtualDma::new(phys_mem, x64::ARCH, translator); + /// + /// let addr = virt_base; // some arbitrary address + /// + /// // writes the text 'hello world' to the specified address in memory + /// virt_mem.write(addr, b"hello world").unwrap(); + /// + /// let mut buffer = [0u8; 11]; + /// + /// { + /// // creates a batcher and reads 11 bytes from memory + /// let mut batcher = MemoryViewBatcher::new(&mut virt_mem); + /// batcher.read_into(addr, &mut buffer); + /// + /// // commit the batch to memory, this is optional and just used to check if the operations succeed + /// batcher.commit_rw().unwrap(); + /// } + /// + /// // compare the memory + /// assert_eq!(&buffer, b"hello world"); + /// ``` + pub fn read_into<'b: 'a, F: Pod + ?Sized>( + &mut self, + addr: Address, + out: &'b mut F, + ) -> &mut Self { + self.read_raw_into(addr, out.as_bytes_mut()) + } + + /// Writes data to memory from the provided buffer. + /// + /// # Example + /// + /// ``` + /// use memflow::prelude::v1::*; + /// use memflow::dummy::DummyMemory; + /// # use memflow::dummy::DummyOs; + /// # use memflow::architecture::x86::x64; + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, virt_base) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let mut virt_mem = VirtualDma::new(phys_mem, x64::ARCH, translator); + /// + /// let addr = virt_base; // some arbitrary address + /// let write_data = [0x10, 0x20, 0x30, 0x40]; + /// let mut read_data = [0u8; 4]; + /// + /// { + /// // create batcher in a new scope + /// let mut batcher = MemoryViewBatcher::new(&mut virt_mem); + /// + /// // writes the block to memory at the specified address + /// batcher.write_raw_into(addr, &write_data); + /// + /// // commit the batch to memory, this is optional and just used to check if the operations succeed + /// assert!(batcher.commit_rw().is_ok()); + /// } + /// + /// // check if the write succeeded + /// virt_mem.read_raw_into(addr, &mut read_data).unwrap(); + /// assert_eq!(read_data, write_data); + /// ``` + pub fn write_raw_into<'b: 'a>(&mut self, addr: Address, out: &'b [u8]) -> &mut Self { + self.write_raw_iter(std::iter::once(CTup2(addr, out.into()))) + } + + /// Serializes data and writes it to memory. + /// + /// # Example + /// + /// ``` + /// use memflow::prelude::v1::*; + /// use memflow::dummy::DummyMemory; + /// # use memflow::dummy::DummyOs; + /// # use memflow::architecture::x86::x64; + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, virt_base) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let mut virt_mem = VirtualDma::new(phys_mem, x64::ARCH, translator); + /// + /// let addr = virt_base; // some arbitrary address + /// let write_data = 0xdeadbeefu64; + /// let mut read_data = 0u64; + /// + /// { + /// // create batcher in a new scope + /// let mut batcher = MemoryViewBatcher::new(&mut virt_mem); + /// + /// // writes the block to memory at the specified address + /// batcher.write_into(addr, &write_data); + /// + /// // commit the batch to memory, this is optional and just used to check if the operations succeed + /// assert!(batcher.commit_rw().is_ok()); + /// } + /// + /// // check if the write succeeded + /// virt_mem.read_into(addr, &mut read_data).unwrap(); + /// assert_eq!(read_data, write_data); + /// ``` + pub fn write_into<'b: 'a, F: Pod + ?Sized>(&mut self, addr: Address, out: &'b F) -> &mut Self { + self.write_raw_into(addr, out.as_bytes()) + } +} + +impl<'a, T: MemoryView> Drop for MemoryViewBatcher<'a, T> { + fn drop(&mut self) { + let _ = self.commit_rw(); + } +} diff --git a/apex_dma/memflow_lib/memflow/src/mem/memory_view/cached_view.rs b/apex_dma/memflow_lib/memflow/src/mem/memory_view/cached_view.rs new file mode 100644 index 0000000..19db9ff --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/mem/memory_view/cached_view.rs @@ -0,0 +1,373 @@ +//! This cache is a wrapper for connector objects that implement the [`PhysicalMemory`] trait. +//! It enables a configurable caching layer when accessing physical pages. +//! +//! Each page that is being read by the the connector will be placed into a `PageCache` object. +//! If the cache is still valid then for consecutive reads this connector will just return the values from the cache +//! and not issue out a new read. In case the cache is not valid anymore it will do a new read. +//! +//! The cache time is determined by the customizable cache validator. +//! The cache validator has to implement the [`CacheValidator`](../trait.CacheValidator.html) trait. +//! +//! To make it easier and quicker to construct and work with caches this module also contains a cache builder. +//! +//! More examples can be found in the documentations for each of the structs in this module. +//! +//! # Examples +//! +//! Building a simple cache with default settings: +//! ``` +//! # const MAGIC_VALUE: u64 = 0x23bd_318f_f3a3_5821; +//! use memflow::prelude::v1::*; +//! use memflow::dummy::DummyMemory; +//! # use memflow::dummy::DummyOs; +//! # use memflow::architecture::x86::x64; +//! +//! # let phys_mem = DummyMemory::new(size::mb(16)); +//! # let mut os = DummyOs::new(phys_mem); +//! # let (dtb, virt_base) = os.alloc_dtb(size::mb(8), &[]); +//! # let phys_mem = os.into_inner(); +//! # let translator = x64::new_translator(dtb); +//! let mut virt_mem = VirtualDma::new(phys_mem, x64::ARCH, translator); +//! +//! let mut cached_mem = CachedView::builder(virt_mem) +//! .arch(x64::ARCH) +//! .validator(DefaultCacheValidator::default()) +//! .cache_size(size::mb(1)) +//! .build() +//! .unwrap(); +//! +//! let addr = virt_base; // some arbitrary address +//! +//! cached_mem.write(addr, &MAGIC_VALUE).unwrap(); +//! +//! let value: u64 = cached_mem.read(addr).unwrap(); +//! assert_eq!(value, MAGIC_VALUE); +//! ``` + +use super::*; +use crate::mem::phys_mem::{page_cache::PageCache, PhysicalMemoryView}; + +/// Cached memory view. +/// +/// This structure allows to build a page cache on top of a memory view. +/// +/// Internally this structure uses the [`CachedPhysicalMemory`] cache. +/// It does this by remapping from / to [`PhysicalMemory`]. +#[derive(Clone)] +pub struct CachedView<'a, T, Q> +where + T: MemoryView, + Q: CacheValidator, +{ + mem: PhysicalMemoryView, Q>>, +} + +impl<'a, T, Q> MemoryView for CachedView<'a, T, Q> +where + T: MemoryView, + Q: CacheValidator, +{ + #[inline] + fn read_raw_iter(&mut self, data: ReadRawMemOps) -> Result<()> { + self.mem.read_raw_iter(data) + } + + #[inline] + fn write_raw_iter(&mut self, data: WriteRawMemOps) -> Result<()> { + self.mem.write_raw_iter(data) + } + + #[inline] + fn metadata(&self) -> MemoryViewMetadata { + self.mem.metadata() + } +} + +impl<'a, T: MemoryView> CachedView<'a, T, DefaultCacheValidator> { + /// Returns a new builder for this cache with default settings. + #[inline] + pub fn builder(mem: T) -> CachedViewBuilder { + CachedViewBuilder::new(mem) + } +} + +pub struct CachedViewBuilder { + mem: T, + validator: Q, + page_size: Option, + cache_size: usize, +} + +impl CachedViewBuilder { + /// Creates a new [`CachedView`] builder. + /// The memory object is mandatory as the [`CachedView`] struct wraps around it. + /// + /// This type of cache also is required to know the exact page size of the target system. + /// This can either be set directly via the `page_size()` method or via the `arch()` method. + /// If no page size has been set this builder will fail to build the [`CachedView`]. + /// + /// Without further adjustments this function creates a cache that is 2 megabytes in size and caches + /// pages that contain pagetable entries as well as read-only pages. + /// + /// It is also possible to either let the [`CachedView`] object own or just borrow the underlying memory object. + /// + /// # Examples + /// Moves ownership of a mem object and retrieves it back: + /// ``` + /// # const MAGIC_VALUE: u64 = 0x23bd_318f_f3a3_5821; + /// use memflow::prelude::v1::*; + /// use memflow::dummy::DummyMemory; + /// # use memflow::dummy::DummyOs; + /// # use memflow::architecture::x86::x64; + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, virt_base) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let mut virt_mem = VirtualDma::new(phys_mem, x64::ARCH, translator); + /// + /// let mut cached_mem = CachedView::builder(virt_mem) + /// .arch(x64::ARCH) + /// .build() + /// .unwrap(); + /// + /// let addr = virt_base; // some arbitrary address + /// + /// cached_mem.write(addr, &MAGIC_VALUE).unwrap(); + /// + /// let value: u64 = cached_mem.read(addr).unwrap(); + /// assert_eq!(value, MAGIC_VALUE); + /// ``` + /// + /// Borrowing a mem object: + /// ``` + /// use memflow::prelude::v1::*; + /// use memflow::dummy::DummyMemory; + /// # use memflow::dummy::DummyOs; + /// # use memflow::architecture::x86::x64; + /// + /// fn build(mem: Fwd<&mut T>) + /// -> impl MemoryView + '_ { + /// CachedView::builder(mem) + /// .arch(x64::ARCH) + /// .build() + /// .unwrap() + /// } + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, virt_base) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let mut virt_mem = VirtualDma::new(phys_mem, x64::ARCH, translator); + /// let mut cached_view = build(virt_mem.forward_mut()); + /// + /// let read = cached_view.read::(0.into()).unwrap(); + /// ``` + pub fn new(mem: T) -> Self { + Self { + mem, + validator: DefaultCacheValidator::default(), + page_size: None, + cache_size: size::mb(2), + } + } +} + +impl CachedViewBuilder { + /// Builds the [`CachedView`] object or returns an error if the page size is not set. + pub fn build<'a>(self) -> Result> { + let phys_mem = self.mem.into_phys_mem(); + + let cache = CachedPhysicalMemory::new( + phys_mem, + PageCache::with_page_size( + self.page_size.ok_or_else(|| { + Error(ErrorOrigin::Cache, ErrorKind::Uninitialized) + .log_error("page_size must be initialized") + })?, + self.cache_size, + // we do not know pagetypes on virtual memory so we have to apply this cache to all types + PageType::all(), + self.validator, + ), + ); + + Ok(CachedView { + mem: cache.into_mem_view(), + }) + } + + /// Sets a custom validator for the cache. + /// + /// If this function is not called it will default to a [`DefaultCacheValidator`]. + /// The default validator for std builds is the [`TimedCacheValidator`]. + /// The default validator for no_std builds is the [`CountCacheValidator`]. + /// + /// The default setting is `DefaultCacheValidator::default()`. + /// + /// # Examples: + /// + /// ``` + /// # const MAGIC_VALUE: u64 = 0x23bd_318f_f3a3_5821; + /// use memflow::prelude::v1::*; + /// use memflow::dummy::DummyMemory; + /// use std::time::Duration; + /// # use memflow::dummy::DummyOs; + /// # use memflow::architecture::x86::x64; + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, virt_base) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let mut virt_mem = VirtualDma::new(phys_mem, x64::ARCH, translator); + /// + /// let mut cached_mem = CachedView::builder(virt_mem) + /// .arch(x64::ARCH) + /// .validator(DefaultCacheValidator::new(Duration::from_millis(2000).into())) + /// .build() + /// .unwrap(); + /// + /// let addr = virt_base; // some arbitrary address + /// + /// cached_mem.write(addr, &MAGIC_VALUE).unwrap(); + /// + /// let value: u64 = cached_mem.read(addr).unwrap(); + /// assert_eq!(value, MAGIC_VALUE); + /// ``` + pub fn validator(self, validator: QN) -> CachedViewBuilder { + CachedViewBuilder { + mem: self.mem, + validator, + page_size: self.page_size, + cache_size: self.cache_size, + } + } + + /// Changes the page size of the cache. + /// + /// The cache has to know the exact page size of the target system internally to give reasonable performance. + /// The page size can be either set directly via this function or it can be fetched from the `Architecture` + /// via the `arch()` method of the builder. + /// + /// If the page size is not set the builder will fail. + /// + /// # Examples + /// + /// ``` + /// # const MAGIC_VALUE: u64 = 0x23bd_318f_f3a3_5821; + /// use memflow::prelude::v1::*; + /// use memflow::dummy::DummyMemory; + /// # use memflow::dummy::DummyOs; + /// # use memflow::architecture::x86::x64; + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, virt_base) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let mut virt_mem = VirtualDma::new(phys_mem, x64::ARCH, translator); + /// + /// let mut cached_mem = CachedView::builder(virt_mem) + /// .page_size(size::kb(4)) + /// .build() + /// .unwrap(); + /// + /// let addr = virt_base; // some arbitrary address + /// + /// cached_mem.write(addr, &MAGIC_VALUE).unwrap(); + /// + /// let value: u64 = cached_mem.read(addr).unwrap(); + /// assert_eq!(value, MAGIC_VALUE); + /// ``` + pub fn page_size(mut self, page_size: usize) -> Self { + self.page_size = Some(page_size); + self + } + + /// Retrieves the page size for this cache from the given `Architecture`. + /// + /// The cache has to know the exact page size of the target system internally to give reasonable performance. + /// The page size can be either fetched from the `Architecture` via this method or it can be set directly + /// via the `page_size()` method of the builder. + /// + /// If the page size is not set the builder will fail. + /// + /// # Examples + /// + /// ``` + /// # const MAGIC_VALUE: u64 = 0x23bd_318f_f3a3_5821; + /// use memflow::prelude::v1::*; + /// use memflow::dummy::DummyMemory; + /// # use memflow::dummy::DummyOs; + /// # use memflow::architecture::x86::x64; + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, virt_base) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let mut virt_mem = VirtualDma::new(phys_mem, x64::ARCH, translator); + /// + /// let mut cached_mem = CachedView::builder(virt_mem) + /// .arch(x64::ARCH) + /// .build() + /// .unwrap(); + /// + /// let addr = virt_base; // some arbitrary address + /// + /// cached_mem.write(addr, &MAGIC_VALUE).unwrap(); + /// + /// let value: u64 = cached_mem.read(addr).unwrap(); + /// assert_eq!(value, MAGIC_VALUE); + /// ``` + pub fn arch(mut self, arch: impl Into) -> Self { + self.page_size = Some(arch.into().page_size()); + self + } + + /// Sets the total amount of cache to be used. + /// + /// This is the total amount of cache (in bytes) this page cache will allocate. + /// Ideally you'd want to keep this value low enough so that most of the cache stays in the lower level caches of your cpu. + /// + /// The default setting is 2 megabytes. + /// + /// This setting can drastically impact the performance of the cache. + /// + /// # Examples: + /// + /// ``` + /// # const MAGIC_VALUE: u64 = 0x23bd_318f_f3a3_5821; + /// use memflow::prelude::v1::*; + /// use memflow::dummy::DummyMemory; + /// # use memflow::dummy::DummyOs; + /// # use memflow::architecture::x86::x64; + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, virt_base) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let mut virt_mem = VirtualDma::new(phys_mem, x64::ARCH, translator); + /// + /// let mut cached_mem = CachedView::builder(virt_mem) + /// .arch(x64::ARCH) + /// .cache_size(size::mb(2)) + /// .build() + /// .unwrap(); + /// + /// let addr = virt_base; // some arbitrary address + /// + /// cached_mem.write(addr, &MAGIC_VALUE).unwrap(); + /// + /// let value: u64 = cached_mem.read(addr).unwrap(); + /// assert_eq!(value, MAGIC_VALUE); + /// ``` + pub fn cache_size(mut self, cache_size: usize) -> Self { + self.cache_size = cache_size; + self + } +} diff --git a/apex_dma/memflow_lib/memflow/src/mem/memory_view/cursor.rs b/apex_dma/memflow_lib/memflow/src/mem/memory_view/cursor.rs new file mode 100644 index 0000000..3469cad --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/mem/memory_view/cursor.rs @@ -0,0 +1,508 @@ +//! The cursor module provides cursor implementations around +//! the [`MemoryView`] trait. +//! +//! The cursor provides the [`Read`](https://doc.rust-lang.org/std/io/trait.Read.html), +//! [`Write`](https://doc.rust-lang.org/std/io/trait.Write.html) and [`Seek`](https://doc.rust-lang.org/std/io/trait.Seek.html) traits +//! for the underlying Memory object. +//! +//! # Examples: +//! +//! ``` +//! use std::io::{self, Read, Write, Seek}; +//! +//! use memflow::dummy::DummyMemory; +//! use memflow::types::size; +//! use memflow::mem::{MemoryCursor, PhysicalMemory}; +//! +//! fn main() -> io::Result<()> { +//! let phys_mem = DummyMemory::new(size::mb(16)); +//! let mut cursor = MemoryCursor::new(phys_mem.into_phys_view()); +//! +//! // read up to 10 bytes +//! let mut buffer = [0; 10]; +//! cursor.read(&mut buffer)?; +//! +//! // write the previously read 10 bytes again +//! cursor.seek(io::SeekFrom::Start(0)); +//! cursor.write(&buffer)?; +//! +//! Ok(()) +//! } +//! ``` + +use std::io::{Error, ErrorKind, Read, Result, Seek, SeekFrom, Write}; + +use super::MemoryView; +use crate::types::{umem, Address}; + +/// MemoryCursor implments a Cursor around the [`MemoryView`] trait. +/// +/// The cursor provides the [`Read`](https://doc.rust-lang.org/std/io/trait.Read.html), +/// [`Write`](https://doc.rust-lang.org/std/io/trait.Write.html) and [`Seek`](https://doc.rust-lang.org/std/io/trait.Seek.html) traits +/// for the underlying [`MemoryView`] object. +/// +/// # Examples: +/// +/// ``` +/// use std::io::{self, Read, Write, Seek}; +/// +/// use memflow::dummy::{DummyOs, DummyMemory}; +/// use memflow::types::size; +/// use memflow::mem::{DirectTranslate, VirtualDma, MemoryCursor}; +/// use memflow::architecture::x86::x64; +/// +/// fn main() -> io::Result<()> { +/// // setup a pseudo virtual memory reader +/// let phys_mem = DummyMemory::new(size::mb(16)); +/// let mut os = DummyOs::new(phys_mem); +/// let (dtb, virt_base) = os.alloc_dtb(size::mb(8), &[]); +/// let phys_mem = os.into_inner(); +/// let translator = x64::new_translator(dtb); +/// +/// let virt_mem = VirtualDma::new(phys_mem, x64::ARCH, translator); +/// +/// // create the actual cursor and seek it to the dummy virt_base +/// let mut cursor = MemoryCursor::new(virt_mem); +/// cursor.seek(io::SeekFrom::Start(virt_base.to_umem() as u64))?; +/// +/// // read up to 10 bytes +/// let mut buffer = [0; 10]; +/// cursor.read(&mut buffer)?; +/// +/// // write the previously read 10 bytes again +/// cursor.seek(io::SeekFrom::Start(virt_base.to_umem() as u64))?; +/// cursor.write(&buffer)?; +/// +/// Ok(()) +/// } +/// ``` +pub struct MemoryCursor { + mem: T, + address: Address, +} + +impl MemoryCursor { + /// Creates a new MemoryCursor by wrapping around a [`MemoryView`] object. + /// + /// Cursor initial position is `0`. + /// + /// # Examples: + /// + /// Borrowing a [`MemoryView`] object: + /// ``` + /// use memflow::dummy::DummyMemory; + /// use memflow::types::size; + /// use memflow::mem::MemoryCursor; + /// # use memflow::dummy::DummyOs; + /// # use memflow::mem::{DirectTranslate, VirtualDma}; + /// # use memflow::architecture::x86::x64; + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, _) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let mut virt_mem = VirtualDma::new(phys_mem, x64::ARCH, translator); + /// let mut cursor = MemoryCursor::new(virt_mem); + /// ``` + /// + /// Taking (temporary) ownership of a [`MemoryView`] object: + /// ``` + /// use memflow::dummy::DummyMemory; + /// use memflow::types::size; + /// use memflow::mem::MemoryCursor; + /// # use memflow::dummy::DummyOs; + /// # use memflow::mem::{DirectTranslate, VirtualDma}; + /// # use memflow::architecture::x86::x64; + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, _) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let virt_mem = VirtualDma::new(phys_mem, x64::ARCH, translator); + /// let mut cursor = MemoryCursor::new(virt_mem); + /// ``` + pub fn new(mem: T) -> Self { + Self { + mem, + address: Address::NULL, + } + } + + /// Creates a new MemoryCursor by wrapping around a [`MemoryView`] object + /// at the desired starting position. + /// + /// Cursor initial position is * `address`. + /// + /// # Examples: + /// + /// Borrowing a [`MemoryView`] object: + /// ``` + /// use memflow::dummy::DummyMemory; + /// use memflow::types::size; + /// use memflow::mem::MemoryCursor; + /// # use memflow::dummy::DummyOs; + /// # use memflow::mem::{DirectTranslate, VirtualDma}; + /// # use memflow::architecture::x86::x64; + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, _) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let mut virt_mem = VirtualDma::new(phys_mem, x64::ARCH, translator); + /// let mut cursor = MemoryCursor::at(virt_mem, 0x1000.into()); + /// ``` + /// + /// Taking (temporary) ownership of a [`MemoryView`] object: + /// ``` + /// use memflow::dummy::DummyMemory; + /// use memflow::types::size; + /// use memflow::mem::MemoryCursor; + /// # use memflow::dummy::DummyOs; + /// # use memflow::mem::{DirectTranslate, VirtualDma}; + /// # use memflow::architecture::x86::x64; + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, _) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let virt_mem = VirtualDma::new(phys_mem, x64::ARCH, translator); + /// let mut cursor = MemoryCursor::at(virt_mem, 0x1000.into()); + /// ``` + pub fn at(mem: T, address: Address) -> Self { + Self { mem, address } + } + + /// Consumes this cursor, returning the underlying [`MemoryView`] object. + /// + /// # Examples + /// + /// ``` + /// use memflow::dummy::DummyMemory; + /// use memflow::types::size; + /// use memflow::mem::MemoryCursor; + /// # use memflow::dummy::DummyOs; + /// # use memflow::mem::{DirectTranslate, VirtualDma}; + /// # use memflow::architecture::x86::x64; + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, _) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let mut cursor = MemoryCursor::new(VirtualDma::new(phys_mem, x64::ARCH, translator)); + /// + /// let phys_mem = cursor.into_inner(); + /// ``` + pub fn into_inner(self) -> T { + self.mem + } + + /// Gets a reference to the underlying [`MemoryView`] object in this cursor. + /// + /// # Examples + /// + /// ``` + /// use memflow::dummy::DummyMemory; + /// use memflow::types::size; + /// use memflow::mem::MemoryCursor; + /// # use memflow::dummy::DummyOs; + /// # use memflow::mem::{DirectTranslate, VirtualDma}; + /// # use memflow::architecture::x86::x64; + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, _) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let mut cursor = MemoryCursor::new(VirtualDma::new(phys_mem, x64::ARCH, translator)); + /// + /// let reference = cursor.get_ref(); + /// ``` + pub fn get_ref(&self) -> &T { + &self.mem + } + + /// Gets a mutable reference to the underlying [`MemoryView`] object in this cursor. + /// + /// # Examples + /// + /// ``` + /// use memflow::dummy::DummyMemory; + /// use memflow::types::size; + /// use memflow::mem::MemoryCursor; + /// # use memflow::dummy::DummyOs; + /// # use memflow::mem::{DirectTranslate, VirtualDma}; + /// # use memflow::architecture::x86::x64; + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, _) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let mut cursor = MemoryCursor::new(VirtualDma::new(phys_mem, x64::ARCH, translator)); + /// + /// let reference = cursor.get_mut(); + /// ``` + pub fn get_mut(&mut self) -> &mut T { + &mut self.mem + } + + /// Returns the current address of this cursor. + /// + /// # Examples + /// + /// ``` + /// use std::io::{Seek, SeekFrom}; + /// + /// use memflow::dummy::DummyMemory; + /// use memflow::types::{Address, size}; + /// use memflow::mem::MemoryCursor; + /// # use memflow::dummy::DummyOs; + /// # use memflow::mem::{DirectTranslate, VirtualDma}; + /// # use memflow::architecture::x86::x64; + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, _) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let mut cursor = MemoryCursor::new(VirtualDma::new(phys_mem, x64::ARCH, translator)); + /// + /// assert_eq!(cursor.address(), Address::NULL); + /// + /// cursor.seek(SeekFrom::Current(2)).unwrap(); + /// assert_eq!(cursor.address(), Address::from(2)); + /// + /// cursor.seek(SeekFrom::Current(-1)).unwrap(); + /// assert_eq!(cursor.address(), Address::from(1)); + /// ``` + pub fn address(&self) -> Address { + self.address + } + + /// Sets the address of this cursor. + /// + /// # Examples + /// + /// ``` + /// use memflow::dummy::DummyMemory; + /// use memflow::types::{Address, size}; + /// use memflow::mem::MemoryCursor; + /// # use memflow::dummy::DummyOs; + /// # use memflow::mem::{DirectTranslate, VirtualDma}; + /// # use memflow::architecture::x86::x64; + /// + /// # let phys_mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(phys_mem); + /// # let (dtb, _) = os.alloc_dtb(size::mb(8), &[]); + /// # let phys_mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let mut cursor = MemoryCursor::new(VirtualDma::new(phys_mem, x64::ARCH, translator)); + /// + /// assert_eq!(cursor.address(), Address::NULL); + /// + /// cursor.set_address(Address::from(2)); + /// assert_eq!(cursor.address(), Address::from(2)); + /// + /// cursor.set_address(Address::from(4)); + /// assert_eq!(cursor.address(), Address::from(4)); + /// ``` + pub fn set_address(&mut self, address: Address) { + self.address = address; + } +} + +impl Read for MemoryCursor { + fn read(&mut self, buf: &mut [u8]) -> Result { + self.mem + .read_raw_into(self.address, buf) + .map_err(|err| Error::new(ErrorKind::UnexpectedEof, err))?; + self.address = (self.address.to_umem() + buf.len() as umem).into(); + Ok(buf.len()) + } +} + +impl Write for MemoryCursor { + fn write(&mut self, buf: &[u8]) -> Result { + self.mem + .write_raw(self.address, buf) + .map_err(|err| Error::new(ErrorKind::UnexpectedEof, err))?; + self.address = (self.address.to_umem() + buf.len() as umem).into(); + Ok(buf.len()) + } + + fn flush(&mut self) -> Result<()> { + Ok(()) + } +} + +impl Seek for MemoryCursor { + fn seek(&mut self, pos: SeekFrom) -> Result { + let target_pos = match pos { + SeekFrom::Start(offs) => offs, + // TODO: do we need +1? + SeekFrom::End(offs) => self + .mem + .metadata() + .max_address + .to_umem() + .wrapping_add(1) + .wrapping_add(offs as umem) as u64, + SeekFrom::Current(offs) => self.address.to_umem().wrapping_add(offs as umem) as u64, + }; + + self.address = target_pos.into(); + Ok(target_pos) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::architecture::x86::{x64, X86VirtualTranslate}; + use crate::dummy::{DummyMemory, DummyOs}; + use crate::mem::{DirectTranslate, PhysicalMemory, VirtualDma}; + use crate::types::{mem, size}; + + fn dummy_phys_mem() -> DummyMemory { + DummyMemory::new(size::mb(1)) + } + + #[test] + fn physical_seek() { + let mut phys_mem = dummy_phys_mem(); + let mut cursor = MemoryCursor::new(phys_mem.phys_view()); + + assert_eq!(cursor.stream_position().unwrap(), 0); + assert_eq!(cursor.seek(SeekFrom::Current(1024)).unwrap(), 1024); + assert_eq!(cursor.seek(SeekFrom::Current(1024)).unwrap(), 2048); + assert_eq!(cursor.seek(SeekFrom::Current(-1024)).unwrap(), 1024); + + assert_eq!(cursor.seek(SeekFrom::Start(512)).unwrap(), 512); + + assert_eq!( + cursor.seek(SeekFrom::End(-512)).unwrap(), + mem::mb(1) as u64 - 512 + ); + } + + #[test] + fn physical_read_write() { + let mut phys_mem = dummy_phys_mem(); + let mut cursor = MemoryCursor::new(phys_mem.phys_view()); + + let write_buf = [0xAu8, 0xB, 0xC, 0xD]; + assert_eq!(cursor.write(&write_buf).unwrap(), 4); // write 4 bytes from the start + assert_eq!(cursor.stream_position().unwrap(), 4); // check if cursor moved 4 bytes + + let mut read_buf = [0u8; 4]; + assert!(cursor.rewind().is_ok()); // roll back cursor to start + assert_eq!(cursor.read(&mut read_buf).unwrap(), 4); // read 4 bytes from the start + assert_eq!(read_buf, write_buf); // compare buffers + } + + #[test] + fn physical_read_write_seek() { + let mut phys_mem = dummy_phys_mem(); + let mut cursor = MemoryCursor::new(phys_mem.phys_view()); + + assert_eq!(cursor.seek(SeekFrom::Start(512)).unwrap(), 512); // seek to 512th byte + + let write_buf = [0xAu8, 0xB, 0xC, 0xD]; + assert_eq!(cursor.write(&write_buf).unwrap(), 4); // write 4 bytes from 512th byte + assert_eq!(cursor.stream_position().unwrap(), 512 + 4); // check if cursor moved 4 bytes + + let mut read_buf = [0u8; 4]; + assert_eq!(cursor.seek(SeekFrom::Start(512)).unwrap(), 512); // roll back cursor to 512th byte + assert_eq!(cursor.read(&mut read_buf).unwrap(), 4); // read 4 bytes from the 512th byte + assert_eq!(read_buf, write_buf); // compare buffers + } + + fn dummy_virt_mem() -> ( + VirtualDma, + Address, + ) { + let phys_mem = DummyMemory::new(size::mb(1)); + let mut os = DummyOs::new(phys_mem); + let (dtb, virt_base) = os.alloc_dtb(size::mb(1), &[]); + let phys_mem = os.into_inner(); + let translator = x64::new_translator(dtb); + (VirtualDma::new(phys_mem, x64::ARCH, translator), virt_base) + } + + #[test] + fn virtual_seek() { + let (virt_mem, _) = dummy_virt_mem(); + let mut cursor = MemoryCursor::new(virt_mem); + + assert_eq!(cursor.stream_position().unwrap(), 0); + assert_eq!(cursor.seek(SeekFrom::Current(1024)).unwrap(), 1024); + assert_eq!(cursor.seek(SeekFrom::Current(1024)).unwrap(), 2048); + assert_eq!(cursor.seek(SeekFrom::Current(-1024)).unwrap(), 1024); + + assert_eq!(cursor.seek(SeekFrom::Start(512)).unwrap(), 512); + } + + #[test] + fn virtual_read_write() { + let (virt_mem, virt_base) = dummy_virt_mem(); + let mut cursor = MemoryCursor::new(virt_mem); + + let write_buf = [0xAu8, 0xB, 0xC, 0xD]; + assert_eq!( + cursor + .seek(SeekFrom::Start(virt_base.to_umem() as u64)) + .unwrap(), + virt_base.to_umem() as u64 + ); + assert_eq!(cursor.write(&write_buf).unwrap(), 4); // write 4 bytes from the start + assert_eq!( + cursor.stream_position().unwrap(), + virt_base.to_umem() as u64 + 4 + ); // check if cursor moved 4 bytes + + let mut read_buf = [0u8; 4]; + assert_eq!( + cursor + .seek(SeekFrom::Start(virt_base.to_umem() as u64)) + .unwrap(), + virt_base.to_umem() as u64 + ); // roll back cursor to start + assert_eq!(cursor.read(&mut read_buf).unwrap(), 4); // read 4 bytes from the start + assert_eq!(read_buf, write_buf); // compare buffers + } + + #[test] + fn virtual_read_write_seek() { + let (virt_mem, virt_base) = dummy_virt_mem(); + let mut cursor = MemoryCursor::new(virt_mem); + + assert_eq!( + cursor + .seek(SeekFrom::Start(virt_base.to_umem() as u64 + 512)) + .unwrap(), + virt_base.to_umem() as u64 + 512 + ); // seek to 512th byte + + let write_buf = [0xAu8, 0xB, 0xC, 0xD]; + assert_eq!(cursor.write(&write_buf).unwrap(), 4); // write 4 bytes from 512th byte + assert_eq!( + cursor.stream_position().unwrap(), + virt_base.to_umem() as u64 + 512 + 4 + ); // check if cursor moved 4 bytes + + let mut read_buf = [0u8; 4]; + assert_eq!( + cursor + .seek(SeekFrom::Start(virt_base.to_umem() as u64 + 512)) + .unwrap(), + virt_base.to_umem() as u64 + 512 + ); // roll back cursor to 512th byte + assert_eq!(cursor.read(&mut read_buf).unwrap(), 4); // read 4 bytes from the 512th byte + assert_eq!(read_buf, write_buf); // compare buffers + } +} diff --git a/apex_dma/memflow_lib/memflow/src/mem/memory_view/mod.rs b/apex_dma/memflow_lib/memflow/src/mem/memory_view/mod.rs new file mode 100644 index 0000000..2a41c83 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/mem/memory_view/mod.rs @@ -0,0 +1,632 @@ +use std::mem::MaybeUninit; +use std::prelude::v1::*; + +use super::{mem_data::*, phys_mem::*}; +use crate::prelude::v1::{Result, *}; + +pub mod arch_overlay; +pub mod batcher; +pub mod cached_view; +pub mod remap_view; + +#[cfg(feature = "std")] +pub mod cursor; + +pub use arch_overlay::ArchOverlayView; +pub use batcher::MemoryViewBatcher; +pub use cached_view::CachedView; +pub use remap_view::RemapView; + +#[cfg(feature = "std")] +pub use cursor::MemoryCursor; + +/// The `MemoryView` trait implements generic access to memory, no matter if it is a process +/// virtual memory, or machine's physical memory. +/// +/// The CPU accesses virtual memory by setting the CR3 register to the appropiate Directory Table Base (DTB) +/// for that process. The ntoskrnl.exe Kernel Process has it's own DTB. +/// Using the DTB it is possible to resolve the physical memory location of a virtual address page. +/// After the address has been resolved the physical memory page can then be read or written to. +/// +/// There are 3 methods which are required to be implemented by the provider of this trait. +/// +/// # Examples +/// +/// Reading from a `MemoryView`: +/// ``` +/// use memflow::types::Address; +/// use memflow::mem::MemoryView; +/// +/// fn read(mem: &mut impl MemoryView, read_addr: Address) { +/// let mut addr = 0u64; +/// mem.read_into(read_addr, &mut addr).unwrap(); +/// println!("addr: {:x}", addr); +/// # assert_eq!(addr, 0x00ff_00ff_00ff_00ff); +/// } +/// # use memflow::dummy::{DummyMemory, DummyOs}; +/// # use memflow::os::Process; +/// # use memflow::types::size; +/// # let mut proc = DummyOs::quick_process(size::mb(2), &[255, 0, 255, 0, 255, 0, 255, 0]); +/// # let virt_base = proc.info().address; +/// # read(&mut proc, virt_base); +/// ``` +#[cfg_attr(feature = "plugins", cglue_trait)] +#[cglue_forward] +#[int_result(PartialResult)] +pub trait MemoryView: Send { + #[int_result] + fn read_raw_iter(&mut self, data: ReadRawMemOps) -> Result<()>; + + #[int_result] + fn write_raw_iter(&mut self, data: WriteRawMemOps) -> Result<()>; + + fn metadata(&self) -> MemoryViewMetadata; + + // Read helpers + + /// Read arbitrary amount of data. + /// + /// # Arguments + /// + /// * `inp` - input iterator of (address, buffer) pairs. + /// * `out` - optional callback for any successful reads - along the way `inp` pairs may be + /// split and only parts of the reads may succeed. This callback will return any successful + /// chunks that have their buffers filled in. + /// * `out_fail` - optional callback for any unsuccessful reads - this is the opposite of + /// `out`, meaning any unsuccessful chunks with buffers in an unspecified state. + /// + /// # Examples + /// + /// ``` + /// use memflow::types::Address; + /// use memflow::mem::MemoryView; + /// use memflow::cglue::CTup2; + /// + /// fn read(mut mem: impl MemoryView, read_addrs: &[Address]) { + /// + /// let mut bufs = vec![0u8; 8 * read_addrs.len()]; + /// + /// let data = read_addrs + /// .iter() + /// .zip(bufs.chunks_mut(8)) + /// .map(|(&a, chunk)| CTup2(a, chunk.into())); + /// + /// mem.read_iter(data, None, None).unwrap(); + /// + /// println!("{:?}", bufs); + /// + /// # assert!(!bufs.chunks_exact(2).inspect(|c| println!("{:?}", c)).any(|c| c != &[255, 0])); + /// } + /// # use memflow::dummy::DummyOs; + /// # use memflow::types::size; + /// # use memflow::os::Process; + /// # let proc = DummyOs::quick_process( + /// # size::mb(2), + /// # &[255, 0].iter().cycle().copied().take(32).collect::>() + /// # ); + /// # let virt_base = proc.info().address; + /// # read(proc, &[virt_base, virt_base + 16usize]); + /// ``` + #[int_result] + #[vtbl_only] + #[custom_impl( + // Types within the C interface other than self and additional wrappers. + { + inp: CIterator>, + out: Option<&mut ReadCallback<'b, 'a>>, + out_fail: Option<&mut ReadCallback<'b, 'a>>, + }, + // Unwrapped return type + Result<()>, + // Conversion in trait impl to C arguments (signature names are expected). + {}, + // This is the body of C impl minus the automatic wrapping. + { + MemOps::with_raw( + inp.map(|CTup2(a, b)| CTup3(a, a, b)), + out, + out_fail, + |data| this.read_raw_iter(data), + ) + }, + // This part is processed in the trait impl after the call returns (impl_func_ret, + // nothing extra needs to happen here). + {}, + )] + fn read_iter<'a, 'b>( + &mut self, + inp: impl Iterator>, + out: Option<&mut ReadCallback<'b, 'a>>, + out_fail: Option<&mut ReadCallback<'b, 'a>>, + ) -> Result<()> { + MemOps::with_raw( + inp.map(|CTup2(a, b)| CTup3(a, a, b)), + out, + out_fail, + |data| self.read_raw_iter(data), + ) + } + + fn read_raw_list(&mut self, data: &mut [ReadData]) -> PartialResult<()> { + let mut out = Ok(()); + + let callback = &mut |CTup2(_, mut d): ReadData| { + out = Err(PartialError::PartialVirtualRead(())); + + // Default behaviour is to zero out any failed data + for v in d.iter_mut() { + *v = 0; + } + + true + }; + + let iter = data + .iter_mut() + .map(|CTup2(d1, d2)| CTup3(*d1, *d1, d2.into())); + + MemOps::with_raw(iter, None, Some(&mut callback.into()), |data| { + self.read_raw_iter(data) + })?; + + out + } + + fn read_raw_into(&mut self, addr: Address, out: &mut [u8]) -> PartialResult<()> { + self.read_raw_list(&mut [CTup2(addr, out.into())]) + } + + #[skip_func] + fn read_raw(&mut self, addr: Address, len: usize) -> PartialResult> { + let mut buf = vec![0u8; len]; + self.read_raw_into(addr, &mut buf).map_data(|_| buf) + } + + #[skip_func] + fn read_into(&mut self, addr: Address, out: &mut T) -> PartialResult<()> + where + Self: Sized, + { + self.read_raw_into(addr, out.as_bytes_mut()) + } + + #[skip_func] + #[allow(clippy::uninit_assumed_init)] + fn read(&mut self, addr: Address) -> PartialResult + where + Self: Sized, + { + let mut obj: T = unsafe { MaybeUninit::uninit().assume_init() }; + // TODO: zero out on partial + self.read_into(addr, &mut obj).map_data(|_| obj) + } + + // TODO: allow cglue to somehow pass MaybeUninit to the IntError + #[skip_func] + fn read_addr32(&mut self, addr: Address) -> PartialResult
+ where + Self: Sized, + { + self.read::(addr).map_data(|d| d.into()) + } + + #[skip_func] + fn read_addr64(&mut self, addr: Address) -> PartialResult
+ where + Self: Sized, + { + self.read::(addr).map_data(|d| d.into()) + } + + /// Reads the specified address as a rip-relative address. + #[skip_func] + fn read_addr64_rip(&mut self, addr: Address) -> PartialResult
+ where + Self: Sized, + { + let displacement = match self.read::(addr + 0x3) { + Ok(d) => d, + Err(e) => return Err(PartialError::Error(e.into())), + }; + Ok(addr + 0x7 + displacement) + } + + #[skip_func] + fn read_addr_arch(&mut self, arch: ArchitectureObj, addr: Address) -> PartialResult
+ where + Self: Sized, + { + match arch.bits() { + 64 => self.read_addr64(addr), + 32 => self.read_addr32(addr), + _ => Err(PartialError::Error(Error( + ErrorOrigin::VirtualMemory, + ErrorKind::InvalidArchitecture, + ))), + } + } + + #[skip_func] + fn read_ptr_into( + &mut self, + ptr: Pointer, + out: &mut T, + ) -> PartialResult<()> + where + Self: Sized, + { + self.read_into(ptr.into(), out) + } + + #[skip_func] + fn read_ptr( + &mut self, + ptr: Pointer, + ) -> PartialResult + where + Self: Sized, + { + self.read(ptr.into()) + } + + // Write helpers + + /// Write arbitrary amount of data. + /// + /// # Arguments + /// + /// * `inp` - input iterator of (address, buffer) pairs. + /// * `out` - optional callback for any successful writes - along the way `inp` pairs may be + /// split and only parts of the writes may succeed. This callback will return any successful + /// chunks that have their buffers filled in. + /// * `out_fail` - optional callback for any unsuccessful writes - this is the opposite of + /// `out`, meaning any unsuccessful chunks with buffers in an unspecified state. + /// + /// # Examples + /// + /// ``` + /// use memflow::types::Address; + /// use memflow::mem::MemoryView; + /// use memflow::cglue::CTup2; + /// use dataview::PodMethods; + /// + /// fn write(mut mem: impl MemoryView, writes: &[(Address, usize)]) { + /// + /// let data = writes + /// .iter() + /// .map(|(a, chunk)| CTup2(*a, chunk.as_bytes().into())); + /// + /// mem.write_iter(data, None, None).unwrap(); + /// + /// # assert_eq!(mem.read::(writes[0].0), Ok(3)); + /// # assert_eq!(mem.read::(writes[1].0), Ok(4)); + /// } + /// # use memflow::dummy::DummyOs; + /// # use memflow::types::size; + /// # use memflow::os::Process; + /// # let proc = DummyOs::quick_process( + /// # size::mb(2), + /// # &[255, 0].iter().cycle().copied().take(32).collect::>() + /// # ); + /// # let virt_base = proc.info().address; + /// # write(proc, &[(virt_base, 3), (virt_base + 16usize, 4)]); + /// ``` + #[int_result] + #[vtbl_only] + #[custom_impl( + // Types within the C interface other than self and additional wrappers. + { + inp: CIterator>, + out: Option<&mut WriteCallback<'b, 'a>>, + out_fail: Option<&mut WriteCallback<'b, 'a>>, + }, + // Unwrapped return type + Result<()>, + // Conversion in trait impl to C arguments (signature names are expected). + {}, + // This is the body of C impl minus the automatic wrapping. + { + MemOps::with_raw( + inp.map(|CTup2(a, b)| CTup3(a, a, b)), + out, + out_fail, + |data| this.write_raw_iter(data), + ) + }, + // This part is processed in the trait impl after the call returns (impl_func_ret, + // nothing extra needs to happen here). + {}, + )] + fn write_iter<'a, 'b>( + &mut self, + inp: impl Iterator>, + out: Option<&mut WriteCallback<'b, 'a>>, + out_fail: Option<&mut WriteCallback<'b, 'a>>, + ) -> Result<()> { + MemOps::with_raw( + inp.map(|CTup2(a, b)| CTup3(a, a, b)), + out, + out_fail, + |data| self.write_raw_iter(data), + ) + } + + fn write_raw_list(&mut self, data: &[WriteData]) -> PartialResult<()> { + let mut out = Ok(()); + + let callback = &mut |_| { + out = Err(PartialError::PartialVirtualWrite(())); + true + }; + + let iter = data.iter().copied(); + + MemOps::with_raw(iter, None, Some(&mut callback.into()), |data| { + self.write_iter(data.inp, data.out, data.out_fail) + })?; + + out + } + + fn write_raw(&mut self, addr: Address, data: &[u8]) -> PartialResult<()> { + self.write_raw_list(&[CTup2(addr, data.into())]) + } + + #[skip_func] + fn write(&mut self, addr: Address, data: &T) -> PartialResult<()> + where + Self: Sized, + { + self.write_raw(addr, data.as_bytes()) + } + + #[skip_func] + fn write_ptr( + &mut self, + ptr: Pointer, + data: &T, + ) -> PartialResult<()> + where + Self: Sized, + { + self.write(ptr.into(), data) + } + + /// Reads a fixed length string from the target. + /// + /// # Remarks: + /// + /// The string does not have to be null-terminated. + /// If a null terminator is found the string is truncated to the terminator. + /// If no null terminator is found the resulting string is exactly `len` characters long. + #[skip_func] + fn read_char_array(&mut self, addr: Address, len: usize) -> PartialResult { + let mut buf = vec![0; len]; + self.read_raw_into(addr, &mut buf).data_part()?; + if let Some((n, _)) = buf.iter().enumerate().find(|(_, c)| **c == 0_u8) { + buf.truncate(n); + } + Ok(String::from_utf8_lossy(&buf).to_string()) + } + + /// Reads a variable length string with a length of up to specified amount from the target. + /// + /// # Arguments + /// + /// * `addr` - target address to read from + /// * `n` - maximum number of bytes to read + /// + /// # Remarks: + /// + /// The string must be null-terminated. + /// If no null terminator is found the this function will return an error. + /// + /// For reading fixed-size char arrays the [`read_char_array`](Self::read_char_array) should be used. + #[skip_func] + fn read_char_string_n(&mut self, addr: Address, n: usize) -> PartialResult { + let mut buf = vec![0; std::cmp::min(32, n)]; + + let mut last_n = 0; + + loop { + let (_, right) = buf.split_at_mut(last_n); + + self.read_raw_into(addr + last_n, right).data_part()?; + if let Some((n, _)) = right.iter().enumerate().find(|(_, c)| **c == 0_u8) { + buf.truncate(last_n + n); + return Ok(String::from_utf8_lossy(&buf).to_string()); + } + if buf.len() >= n { + break; + } + last_n = buf.len(); + + buf.extend((0..buf.len()).map(|_| 0)); + } + + Err(PartialError::Error(Error( + ErrorOrigin::VirtualMemory, + ErrorKind::OutOfBounds, + ))) + } + + /// Reads a variable length string with up to 4kb length from the target. + /// + /// # Arguments + /// + /// * `addr` - target address to read from + #[skip_func] + fn read_char_string(&mut self, addr: Address) -> PartialResult { + self.read_char_string_n(addr, 4096) + } + + #[cfg(feature = "std")] + #[skip_func] + fn cursor(&mut self) -> MemoryCursor> + where + Self: Sized, + { + MemoryCursor::new(self.forward()) + } + + #[cfg(feature = "std")] + #[skip_func] + fn into_cursor(self) -> MemoryCursor + where + Self: Sized, + { + MemoryCursor::new(self) + } + + #[cfg(feature = "std")] + #[skip_func] + fn cursor_at(&mut self, address: Address) -> MemoryCursor> + where + Self: Sized, + { + MemoryCursor::at(self.forward(), address) + } + + #[cfg(feature = "std")] + #[skip_func] + fn into_cursor_at(self, address: Address) -> MemoryCursor + where + Self: Sized, + { + MemoryCursor::at(self, address) + } + + #[skip_func] + fn batcher(&mut self) -> MemoryViewBatcher + where + Self: Sized, + { + MemoryViewBatcher::new(self) + } + + #[skip_func] + fn into_overlay_arch(self, arch: ArchitectureObj) -> ArchOverlayView + where + Self: Sized, + { + ArchOverlayView::new(self, arch) + } + + #[skip_func] + fn overlay_arch(&mut self, arch: ArchitectureObj) -> ArchOverlayView> + where + Self: Sized, + { + ArchOverlayView::new(self.forward_mut(), arch) + } + + #[skip_func] + fn into_overlay_arch_parts(self, arch_bits: u8, little_endian: bool) -> ArchOverlayView + where + Self: Sized, + { + ArchOverlayView::new_parts(self, arch_bits, little_endian) + } + + #[skip_func] + fn overlay_arch_parts( + &mut self, + arch_bits: u8, + little_endian: bool, + ) -> ArchOverlayView> + where + Self: Sized, + { + ArchOverlayView::new_parts(self.forward_mut(), arch_bits, little_endian) + } + + #[skip_func] + fn into_remap_view(self, mem_map: MemoryMap<(Address, umem)>) -> RemapView + where + Self: Sized, + { + RemapView::new(self, mem_map) + } + + #[skip_func] + fn remap_view(&mut self, mem_map: MemoryMap<(Address, umem)>) -> RemapView> + where + Self: Sized, + { + self.forward_mut().into_remap_view(mem_map) + } + + // deprecated = Expose this via cglue + #[skip_func] + fn into_phys_mem(self) -> PhysicalMemoryOnView + where + Self: Sized, + { + PhysicalMemoryOnView { mem: self } + } + + // deprecated = Expose this via cglue + #[skip_func] + fn phys_mem(&mut self) -> PhysicalMemoryOnView> + where + Self: Sized, + { + self.forward_mut().into_phys_mem() + } +} + +/// Creates a PhysicalMemory object from a MemoryView without doing any translations. +/// This function simply redirects all calls to PhysicalMemory to the underlying MemoryView +#[repr(C)] +#[derive(Clone)] +#[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))] +pub struct PhysicalMemoryOnView { + mem: T, +} + +impl PhysicalMemory for PhysicalMemoryOnView +where + T: MemoryView, +{ + #[inline] + fn phys_read_raw_iter( + &mut self, + MemOps { inp, out, out_fail }: PhysicalReadMemOps, + ) -> Result<()> { + let inp = inp.map(|CTup3(addr, meta_addr, data)| CTup3(addr.into(), meta_addr, data)); + MemOps::with_raw(inp, out, out_fail, |data| self.mem.read_raw_iter(data)) + } + + #[inline] + fn phys_write_raw_iter( + &mut self, + MemOps { inp, out, out_fail }: PhysicalWriteMemOps, + ) -> Result<()> { + let inp = inp.map(|CTup3(addr, meta_addr, data)| CTup3(addr.into(), meta_addr, data)); + MemOps::with_raw(inp, out, out_fail, |data| self.mem.write_raw_iter(data)) + } + + #[inline] + fn metadata(&self) -> PhysicalMemoryMetadata { + let md = self.mem.metadata(); + + PhysicalMemoryMetadata { + max_address: md.max_address, + real_size: md.real_size, + readonly: md.readonly, + ideal_batch_size: 4096, + } + } +} + +#[repr(C)] +#[derive(Debug, Clone, Copy)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +#[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))] +pub struct MemoryViewMetadata { + pub max_address: Address, + pub real_size: umem, + pub readonly: bool, + pub little_endian: bool, + pub arch_bits: u8, +} diff --git a/apex_dma/memflow_lib/memflow/src/mem/memory_view/remap_view.rs b/apex_dma/memflow_lib/memflow/src/mem/memory_view/remap_view.rs new file mode 100644 index 0000000..a56beb5 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/mem/memory_view/remap_view.rs @@ -0,0 +1,82 @@ +//! Remapping layer for a memory view. +use super::*; + +/// Remapped memory view. +/// +/// This structure allows to build a new memory view as a subset of an existing view. +/// +/// This is useful for nested VM introspection, or analyzing emulators and custom memory +/// structures. +#[derive(Clone)] +pub struct RemapView { + mem: T, + mem_map: MemoryMap<(Address, umem)>, +} + +impl RemapView { + pub fn new(mem: T, mem_map: MemoryMap<(Address, umem)>) -> Self { + Self { mem, mem_map } + } +} + +impl MemoryView for RemapView { + fn read_raw_iter(&mut self, MemOps { inp, out_fail, out }: ReadRawMemOps) -> Result<()> { + let out_fail = out_fail.map(std::cell::RefCell::new); + + let mut out_fail1 = out_fail + .as_ref() + .map(|of| move |data| of.borrow_mut().call(data)); + let mut out_fail2 = out_fail + .as_ref() + .map(|of| move |data| of.borrow_mut().call(data)); + let mut out_fail2 = out_fail2.as_mut().map(<_>::into); + let out_fail2 = out_fail2.as_mut(); + + let mut out = out.map(|o| move |data| o.call(data)); + let mut out = out.as_mut().map(<_>::into); + let out = out.as_mut(); + + let mem_map = &mut self.mem_map; + let mem = &mut self.mem; + + let iter = mem_map + .map_base_iter(inp, out_fail1.as_mut()) + .map(|CTup3((a, _), m, b)| CTup3(a, m, b)); + + MemOps::with_raw(iter, out, out_fail2, |data| mem.read_raw_iter(data)) + } + + fn write_raw_iter(&mut self, MemOps { inp, out_fail, out }: WriteRawMemOps) -> Result<()> { + let out_fail = out_fail.map(std::cell::RefCell::new); + + let mut out_fail1 = out_fail + .as_ref() + .map(|of| move |data| of.borrow_mut().call(data)); + let mut out_fail2 = out_fail + .as_ref() + .map(|of| move |data| of.borrow_mut().call(data)); + let mut out_fail2 = out_fail2.as_mut().map(<_>::into); + let out_fail2 = out_fail2.as_mut(); + + let mut out = out.map(|o| move |data| o.call(data)); + let mut out = out.as_mut().map(<_>::into); + let out = out.as_mut(); + + let mem_map = &mut self.mem_map; + let mem = &mut self.mem; + + let iter = mem_map + .map_base_iter(inp, out_fail1.as_mut()) + .map(|CTup3((a, _), m, b)| CTup3(a, m, b)); + + MemOps::with_raw(iter, out, out_fail2, |data| mem.write_raw_iter(data)) + } + + fn metadata(&self) -> MemoryViewMetadata { + MemoryViewMetadata { + max_address: self.mem_map.max_address(), + real_size: self.mem_map.real_size(), + ..self.mem.metadata() + } + } +} diff --git a/apex_dma/memflow_lib/memflow/src/mem/mod.rs b/apex_dma/memflow_lib/memflow/src/mem/mod.rs index f7846be..a701326 100644 --- a/apex_dma/memflow_lib/memflow/src/mem/mod.rs +++ b/apex_dma/memflow_lib/memflow/src/mem/mod.rs @@ -1,39 +1,32 @@ -/*! -This module covers all implementations and traits related to -reading/writing [physical](phys/index.html) and [virtual](virt/index.html) memory. +//! This module covers all implementations and traits related to +//! reading/writing [physical](phys_mem/index.html) and [virtual](virt_mem/index.html) memory. +//! +//! The [cache](cache/index.html) module contains all caching related +//! implementations. The caches just wrap the physical and virtual accessors +//! and are themselves a memory backend. +//! +//! TODO: more documentation -The [cache](cache/index.html) module contains all caching related -implementations. The caches just wrap the physical and virtual accessors -and are themselves a memory backend. - -TODO: more documentation -*/ - -pub mod cache; +pub mod mem_data; pub mod mem_map; +pub mod memory_view; pub mod phys_mem; -pub mod phys_mem_batcher; pub mod virt_mem; -pub mod virt_mem_batcher; pub mod virt_translate; -#[cfg(any(feature = "dummy_mem", test))] -pub mod dummy; - -#[doc(hidden)] -pub use cache::*; // TODO: specify pub declarations -#[doc(hidden)] -pub use mem_map::MemoryMap; -#[doc(hidden)] -pub use phys_mem::{ - CloneablePhysicalMemory, PhysicalMemory, PhysicalMemoryBox, PhysicalMemoryMetadata, - PhysicalReadData, PhysicalReadIterator, PhysicalWriteData, PhysicalWriteIterator, +pub use mem_map::{MemoryMap, PhysicalMemoryMapping}; +pub use phys_mem::{CachedPhysicalMemory, PhysicalMemory, PhysicalMemoryMetadata}; +#[cfg(feature = "std")] +pub use phys_mem::{DelayedPhysicalMemory, PhysicalMemoryMetrics}; +pub use virt_mem::VirtualDma; +pub use virt_translate::{ + CachedVirtualTranslate, DirectTranslate, VirtualTranslate, VirtualTranslate2, + VirtualTranslate3, VtopFailureCallback, VtopOutputCallback, }; -#[doc(hidden)] -pub use phys_mem_batcher::PhysicalMemoryBatcher; -#[doc(hidden)] -pub use virt_mem::{VirtualDMA, VirtualMemory, VirtualReadData, VirtualWriteData}; -#[doc(hidden)] -pub use virt_mem_batcher::VirtualMemoryBatcher; -#[doc(hidden)] -pub use virt_translate::{DirectTranslate, VirtualTranslate}; + +pub use memory_view::{CachedView, MemoryView, MemoryViewBatcher, MemoryViewMetadata}; + +#[cfg(feature = "std")] +pub use memory_view::MemoryCursor; + +pub use mem_data::*; diff --git a/apex_dma/memflow_lib/memflow/src/mem/phys_mem.rs b/apex_dma/memflow_lib/memflow/src/mem/phys_mem.rs deleted file mode 100644 index a05d632..0000000 --- a/apex_dma/memflow_lib/memflow/src/mem/phys_mem.rs +++ /dev/null @@ -1,249 +0,0 @@ -use std::prelude::v1::*; - -use super::PhysicalMemoryBatcher; -use crate::error::Result; -use crate::types::PhysicalAddress; - -use std::mem::MaybeUninit; - -use dataview::Pod; - -// TODO: -// - check endianess here and return an error -// - better would be to convert endianess with word alignment from addr - -/// The `PhysicalMemory` trait is implemented by memory backends -/// and provides a generic way to read and write from/to physical memory. -/// -/// All addresses are of the type [`PhysicalAddress`](../types/physical_address/index.html) -/// and can contain additional information about the page the address resides in. -/// This information is usually only needed when implementing caches. -/// -/// There are only 2 methods which are required to be implemented by the provider of this trait. -/// -/// # Examples -/// -/// Implementing `PhysicalMemory` for a memory backend: -/// ``` -/// use std::vec::Vec; -/// -/// use memflow::mem::{ -/// PhysicalMemory, -/// PhysicalReadData, -/// PhysicalWriteData, -/// PhysicalMemoryMetadata -/// }; -/// -/// use memflow::types::PhysicalAddress; -/// use memflow::error::Result; -/// -/// pub struct MemoryBackend { -/// mem: Box<[u8]>, -/// } -/// -/// impl PhysicalMemory for MemoryBackend { -/// fn phys_read_raw_list( -/// &mut self, -/// data: &mut [PhysicalReadData] -/// ) -> Result<()> { -/// data -/// .iter_mut() -/// .for_each(|PhysicalReadData(addr, out)| out -/// .copy_from_slice(&self.mem[addr.as_usize()..(addr.as_usize() + out.len())]) -/// ); -/// Ok(()) -/// } -/// -/// fn phys_write_raw_list( -/// &mut self, -/// data: &[PhysicalWriteData] -/// ) -> Result<()> { -/// data -/// .iter() -/// .for_each(|PhysicalWriteData(addr, data)| self -/// .mem[addr.as_usize()..(addr.as_usize() + data.len())].copy_from_slice(data) -/// ); -/// Ok(()) -/// } -/// -/// fn metadata(&self) -> PhysicalMemoryMetadata { -/// PhysicalMemoryMetadata { -/// size: self.mem.len(), -/// readonly: false -/// } -/// } -/// } -/// ``` -/// -/// Reading from `PhysicalMemory`: -/// ``` -/// use memflow::types::Address; -/// use memflow::mem::PhysicalMemory; -/// -/// fn read(mem: &mut T) { -/// let mut addr = 0u64; -/// mem.phys_read_into(Address::from(0x1000).into(), &mut addr).unwrap(); -/// println!("addr: {:x}", addr); -/// } -/// -/// # use memflow::mem::dummy::DummyMemory; -/// # use memflow::types::size; -/// # read(&mut DummyMemory::new(size::mb(4))); -/// ``` -pub trait PhysicalMemory -where - Self: Send, -{ - fn phys_read_raw_list(&mut self, data: &mut [PhysicalReadData]) -> Result<()>; - fn phys_write_raw_list(&mut self, data: &[PhysicalWriteData]) -> Result<()>; - - /// Retrieve metadata about the physical memory - /// - /// This function will return metadata about the underlying physical memory object, currently - /// including address space size and read-only status. - /// - /// # Examples - /// - /// ``` - /// use memflow::types::size; - /// use memflow::mem::PhysicalMemory; - /// # let mem = memflow::mem::dummy::DummyMemory::new(size::mb(16)); - /// - /// let metadata = mem.metadata(); - /// - /// assert_eq!(metadata.size, size::mb(16)); - /// assert_eq!(metadata.readonly, false); - /// ``` - fn metadata(&self) -> PhysicalMemoryMetadata; - - // read helpers - fn phys_read_raw_into(&mut self, addr: PhysicalAddress, out: &mut [u8]) -> Result<()> { - self.phys_read_raw_list(&mut [PhysicalReadData(addr, out)]) - } - - fn phys_read_into(&mut self, addr: PhysicalAddress, out: &mut T) -> Result<()> - where - Self: Sized, - { - self.phys_read_raw_into(addr, out.as_bytes_mut()) - } - - fn phys_read_raw(&mut self, addr: PhysicalAddress, len: usize) -> Result> { - let mut buf = vec![0u8; len]; - self.phys_read_raw_into(addr, &mut *buf)?; - Ok(buf) - } - - /// # Safety - /// - /// this function will overwrite the contents of 'obj' so we can just allocate an unitialized memory section. - /// this function should only be used with [repr(C)] structs. - #[allow(clippy::uninit_assumed_init)] - fn phys_read(&mut self, addr: PhysicalAddress) -> Result - where - Self: Sized, - { - let mut obj: T = unsafe { MaybeUninit::uninit().assume_init() }; - self.phys_read_into(addr, &mut obj)?; - Ok(obj) - } - - // write helpers - fn phys_write_raw(&mut self, addr: PhysicalAddress, data: &[u8]) -> Result<()> { - self.phys_write_raw_list(&[PhysicalWriteData(addr, data)]) - } - - fn phys_write(&mut self, addr: PhysicalAddress, data: &T) -> Result<()> - where - Self: Sized, - { - self.phys_write_raw(addr, data.as_bytes()) - } - - fn phys_batcher(&mut self) -> PhysicalMemoryBatcher - where - Self: Sized, - { - PhysicalMemoryBatcher::new(self) - } -} - -// forward impls -impl + Send> PhysicalMemory for P { - #[inline] - fn phys_read_raw_list(&mut self, data: &mut [PhysicalReadData]) -> Result<()> { - (**self).phys_read_raw_list(data) - } - - #[inline] - fn phys_write_raw_list(&mut self, data: &[PhysicalWriteData]) -> Result<()> { - (**self).phys_write_raw_list(data) - } - - #[inline] - fn metadata(&self) -> PhysicalMemoryMetadata { - (**self).metadata() - } -} - -/// Wrapper trait around physical memory which implements a boxed clone -pub trait CloneablePhysicalMemory: PhysicalMemory { - fn clone_box(&self) -> Box; - fn downcast(&mut self) -> &mut dyn PhysicalMemory; -} - -/// A sized Box containing a CloneablePhysicalMemory -pub type PhysicalMemoryBox = Box; - -/// Forward implementation of CloneablePhysicalMemory for every Cloneable backend. -impl CloneablePhysicalMemory for T -where - T: PhysicalMemory + Clone + 'static, -{ - fn clone_box(&self) -> PhysicalMemoryBox { - Box::new(self.clone()) - } - - fn downcast(&mut self) -> &mut dyn PhysicalMemory { - self - } -} - -/// Clone forward implementation for a PhysicalMemory Box -impl Clone for PhysicalMemoryBox { - fn clone(&self) -> Self { - (**self).clone_box() - } -} - -#[derive(Debug, Clone, Copy)] -#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] -#[repr(C)] -pub struct PhysicalMemoryMetadata { - pub size: usize, - pub readonly: bool, -} - -// iterator helpers -#[repr(C)] -pub struct PhysicalReadData<'a>(pub PhysicalAddress, pub &'a mut [u8]); -pub trait PhysicalReadIterator<'a>: Iterator> + 'a {} -impl<'a, T: Iterator> + 'a> PhysicalReadIterator<'a> for T {} - -impl<'a> From> for (PhysicalAddress, &'a mut [u8]) { - fn from(PhysicalReadData(a, b): PhysicalReadData<'a>) -> Self { - (a, b) - } -} - -#[repr(C)] -#[derive(Clone, Copy)] -pub struct PhysicalWriteData<'a>(pub PhysicalAddress, pub &'a [u8]); -pub trait PhysicalWriteIterator<'a>: Iterator> + 'a {} -impl<'a, T: Iterator> + 'a> PhysicalWriteIterator<'a> for T {} - -impl<'a> From> for (PhysicalAddress, &'a [u8]) { - fn from(PhysicalWriteData(a, b): PhysicalWriteData<'a>) -> Self { - (a, b) - } -} diff --git a/apex_dma/memflow_lib/memflow/src/mem/cache/cached_memory_access.rs b/apex_dma/memflow_lib/memflow/src/mem/phys_mem/middleware/cache/mod.rs similarity index 59% rename from apex_dma/memflow_lib/memflow/src/mem/cache/cached_memory_access.rs rename to apex_dma/memflow_lib/memflow/src/mem/phys_mem/middleware/cache/mod.rs index 92958a4..a6208bc 100644 --- a/apex_dma/memflow_lib/memflow/src/mem/cache/cached_memory_access.rs +++ b/apex_dma/memflow_lib/memflow/src/mem/phys_mem/middleware/cache/mod.rs @@ -1,58 +1,63 @@ -/*! -This cache is a wrapper for connector objects that implement the `PhysicalMemory` trait. -It enables a configurable caching layer when accessing physical pages. +//! This cache is a wrapper for connector objects that implement the [`PhysicalMemory`] trait. +//! It enables a configurable caching layer when accessing physical pages. +//! +//! Each page that is being read by the the connector will be placed into a `PageCache` object. +//! If the cache is still valid then for consecutive reads this connector will just return the values from the cache +//! and not issue out a new read. In case the cache is not valid anymore it will do a new read. +//! +//! The cache time is determined by the customizable cache validator. +//! The cache validator has to implement the [`CacheValidator`](../trait.CacheValidator.html) trait. +//! +//! To make it easier and quicker to construct and work with caches this module also contains a cache builder. +//! +//! More examples can be found in the documentations for each of the structs in this module. +//! +//! # Examples +//! +//! Building a simple cache with default settings: +//! ``` +//! use memflow::architecture::x86::x64; +//! use memflow::mem::{PhysicalMemory, CachedPhysicalMemory}; +//! use memflow::types::size; +//! +//! fn build(mem: T) { +//! let cache = CachedPhysicalMemory::builder(mem) +//! .arch(x64::ARCH) +//! .cache_size(size::mb(1)) +//! .build() +//! .unwrap(); +//! } +//! ``` -Each page that is being read by the the connector will be placed into a `PageCache` object. -If the cache is still valid then for consecutive reads this connector will just return the values from the cache -and not issue out a new read. In case the cache is not valid anymore it will do a new read. +pub(crate) mod page_cache; -The cache time is determined by the customizable cache validator. -The cache validator has to implement the [`CacheValidator`](../trait.CacheValidator.html) trait. - -To make it easier and quicker to construct and work with caches this module also contains a cache builder. - -More examples can be found in the documentations for each of the structs in this module. - -# Examples - -Building a simple cache with default settings: -``` -use memflow::architecture::x86::x64; -use memflow::mem::{PhysicalMemory, CachedMemoryAccess}; - -fn build(mem: T) { - let cache = CachedMemoryAccess::builder(mem) - .arch(x64::ARCH) - .build() - .unwrap(); -} -``` -*/ - -use super::{ - page_cache::PageCache, page_cache::PageValidity, CacheValidator, DefaultCacheValidator, -}; use crate::architecture::ArchitectureObj; -use crate::error::Result; +use crate::error::{Error, ErrorKind, ErrorOrigin, Result}; use crate::iter::PageChunks; -use crate::mem::phys_mem::{ - PhysicalMemory, PhysicalMemoryMetadata, PhysicalReadData, PhysicalWriteData, +use crate::mem::{ + MemOps, PhysicalMemory, PhysicalMemoryMapping, PhysicalMemoryMetadata, PhysicalReadMemOps, + PhysicalWriteMemOps, }; +use cglue::tuple::*; +use page_cache::{PageCache, PageValidity}; + +use crate::types::cache::{CacheValidator, DefaultCacheValidator}; + use crate::types::{size, PageType}; use bumpalo::Bump; /// The cache object that can use as a drop-in replacement for any Connector. /// -/// Since this cache implements `PhysicalMemory` it can be used as a replacement -/// in all structs and functions that require a `PhysicalMemory` object. -pub struct CachedMemoryAccess<'a, T, Q> { +/// Since this cache implements [`PhysicalMemory`] it can be used as a replacement +/// in all structs and functions that require a [`PhysicalMemory`] object. +pub struct CachedPhysicalMemory<'a, T, Q> { mem: T, cache: PageCache<'a, Q>, arena: Bump, } -impl<'a, T, Q> Clone for CachedMemoryAccess<'a, T, Q> +impl<'a, T, Q> Clone for CachedPhysicalMemory<'a, T, Q> where T: Clone, Q: CacheValidator + Clone, @@ -66,12 +71,12 @@ where } } -impl<'a, T: PhysicalMemory, Q: CacheValidator> CachedMemoryAccess<'a, T, Q> { +impl<'a, T: PhysicalMemory, Q: CacheValidator> CachedPhysicalMemory<'a, T, Q> { /// Constructs a new cache based on the given `PageCache`. /// /// This function is used when manually constructing a cache inside of the memflow crate itself. /// - /// For general usage it is advised to just use the [builder](struct.CachedMemoryAccessBuilder.html) + /// For general usage it is advised to just use the [builder](struct.CachedPhysicalMemoryBuilder.html) /// to construct the cache. pub fn new(mem: T, cache: PageCache<'a, Q>) -> Self { Self { @@ -91,78 +96,91 @@ impl<'a, T: PhysicalMemory, Q: CacheValidator> CachedMemoryAccess<'a, T, Q> { /// ``` /// # const MAGIC_VALUE: u64 = 0x23bd_318f_f3a3_5821; /// use memflow::architecture::x86::x64; - /// use memflow::mem::{PhysicalMemory, CachedMemoryAccess}; + /// use memflow::mem::{PhysicalMemory, CachedPhysicalMemory, MemoryView}; /// /// fn build(mem: T) -> T { - /// let mut cache = CachedMemoryAccess::builder(mem) + /// let mut cache = CachedPhysicalMemory::builder(mem) /// .arch(x64::ARCH) /// .build() /// .unwrap(); /// /// // use the cache... - /// let value: u64 = cache.phys_read(0.into()).unwrap(); + /// let value: u64 = cache.phys_view().read(0.into()).unwrap(); /// assert_eq!(value, MAGIC_VALUE); /// /// // retrieve ownership of mem and return it back - /// cache.destroy() + /// cache.into_inner() /// } - /// # use memflow::mem::dummy::DummyMemory; + /// # use memflow::dummy::DummyMemory; /// # use memflow::types::size; /// # let mut mem = DummyMemory::new(size::mb(4)); /// # mem.phys_write(0.into(), &MAGIC_VALUE).unwrap(); /// # build(mem); /// ``` - pub fn destroy(self) -> T { + pub fn into_inner(self) -> T { self.mem } } -impl<'a, T: PhysicalMemory> CachedMemoryAccess<'a, T, DefaultCacheValidator> { +impl<'a, T: PhysicalMemory> CachedPhysicalMemory<'a, T, DefaultCacheValidator> { /// Returns a new builder for this cache with default settings. - pub fn builder(mem: T) -> CachedMemoryAccessBuilder { - CachedMemoryAccessBuilder::new(mem) + pub fn builder(mem: T) -> CachedPhysicalMemoryBuilder { + CachedPhysicalMemoryBuilder::new(mem) } } // forward PhysicalMemory trait fncs -impl<'a, T: PhysicalMemory, Q: CacheValidator> PhysicalMemory for CachedMemoryAccess<'a, T, Q> { - fn phys_read_raw_list(&mut self, data: &mut [PhysicalReadData]) -> Result<()> { +impl<'a, T: PhysicalMemory, Q: CacheValidator> PhysicalMemory for CachedPhysicalMemory<'a, T, Q> { + fn phys_read_raw_iter(&mut self, data: PhysicalReadMemOps) -> Result<()> { self.cache.validator.update_validity(); self.arena.reset(); self.cache.cached_read(&mut self.mem, data, &self.arena) } - fn phys_write_raw_list(&mut self, data: &[PhysicalWriteData]) -> Result<()> { + fn phys_write_raw_iter( + &mut self, + //data: PhysicalWriteMemOps, + MemOps { inp, out, out_fail }: PhysicalWriteMemOps, + ) -> Result<()> { self.cache.validator.update_validity(); - let cache = &mut self.cache; let mem = &mut self.mem; + let cache = &mut self.cache; - data.iter().for_each(move |PhysicalWriteData(addr, data)| { + let inp = inp.map(move |CTup3(addr, meta_addr, data)| { if cache.is_cached_page_type(addr.page_type()) { for (paddr, data_chunk) in data.page_chunks(addr.address(), cache.page_size()) { let mut cached_page = cache.cached_page_mut(paddr, false); if let PageValidity::Valid(buf) = &mut cached_page.validity { // write-back into still valid cache pages - let start = paddr - cached_page.address; - buf[start..(start + data_chunk.len())].copy_from_slice(data_chunk); + let start = (paddr - cached_page.address) as usize; + buf[start..(start + data_chunk.len())].copy_from_slice(data_chunk.into()); } cache.put_entry(cached_page); } } + CTup3(addr, meta_addr, data) }); - mem.phys_write_raw_list(data) + MemOps::with_raw(inp, out, out_fail, move |data| { + mem.phys_write_raw_iter(data) + }) } + #[inline] fn metadata(&self) -> PhysicalMemoryMetadata { self.mem.metadata() } + + #[inline] + fn set_mem_map(&mut self, mem_map: &[PhysicalMemoryMapping]) { + self.mem.set_mem_map(mem_map) + } } -/// The builder interface for constructing a `CachedMemoryAccess` object. -pub struct CachedMemoryAccessBuilder { +/// The builder interface for constructing a `CachedPhysicalMemory` object. +pub struct CachedPhysicalMemoryBuilder { mem: T, validator: Q, page_size: Option, @@ -170,40 +188,41 @@ pub struct CachedMemoryAccessBuilder { page_type_mask: PageType, } -impl CachedMemoryAccessBuilder { - /// Creates a new `CachedMemoryAccess` builder. - /// The memory object is mandatory as the CachedMemoryAccess struct wraps around it. +impl CachedPhysicalMemoryBuilder { + /// Creates a new [`CachedPhysicalMemory`] builder. + /// The memory object is mandatory as the [`CachedPhysicalMemory`] struct wraps around it. /// /// This type of cache also is required to know the exact page size of the target system. /// This can either be set directly via the `page_size()` method or via the `arch()` method. - /// If no page size has been set this builder will fail to build the CachedMemoryAccess. + /// If no page size has been set this builder will fail to build the [`CachedPhysicalMemory`]. /// /// Without further adjustments this function creates a cache that is 2 megabytes in size and caches /// pages that contain pagetable entries as well as read-only pages. /// - /// It is also possible to either let the `CachedMemoryAccess` object own or just borrow the underlying memory object. + /// It is also possible to either let the `[`CachedPhysicalMemory`]` object own or just borrow the underlying memory object. /// /// # Examples /// Moves ownership of a mem object and retrieves it back: /// ``` /// # const MAGIC_VALUE: u64 = 0x23bd_318f_f3a3_5821; /// use memflow::architecture::x86::x64; - /// use memflow::mem::{PhysicalMemory, CachedMemoryAccess}; + /// use memflow::mem::{PhysicalMemory, CachedPhysicalMemory, MemoryView}; /// /// fn build(mem: T) { - /// let mut cache = CachedMemoryAccess::builder(mem) + /// let mut cache = CachedPhysicalMemory::builder(mem) /// .arch(x64::ARCH) + /// .cache_size(size::mb(1)) /// .build() /// .unwrap(); /// /// cache.phys_write(0.into(), &MAGIC_VALUE); /// - /// let mut mem = cache.destroy(); + /// let mut mem = cache.into_inner(); /// - /// let value: u64 = mem.phys_read(0.into()).unwrap(); + /// let value: u64 = mem.phys_view().read(0.into()).unwrap(); /// assert_eq!(value, MAGIC_VALUE); /// } - /// # use memflow::mem::dummy::DummyMemory; + /// # use memflow::dummy::DummyMemory; /// # use memflow::types::size; /// # let mut mem = DummyMemory::new(size::mb(4)); /// # mem.phys_write(0.into(), &0xffaaffaau64).unwrap(); @@ -214,23 +233,24 @@ impl CachedMemoryAccessBuilder { /// ``` /// # const MAGIC_VALUE: u64 = 0x23bd_318f_f3a3_5821; /// use memflow::architecture::x86::x64; - /// use memflow::mem::{PhysicalMemory, CachedMemoryAccess}; + /// use memflow::mem::{PhysicalMemory, CachedPhysicalMemory, MemoryView}; + /// use memflow::cglue::{Fwd, ForwardMut}; /// - /// fn build(mem: &mut T) + /// fn build(mem: Fwd<&mut T>) /// -> impl PhysicalMemory + '_ { - /// CachedMemoryAccess::builder(mem) + /// CachedPhysicalMemory::builder(mem) /// .arch(x64::ARCH) /// .build() /// .unwrap() /// } /// - /// # use memflow::mem::dummy::DummyMemory; + /// # use memflow::dummy::DummyMemory; /// # use memflow::types::size; /// # let mut mem = DummyMemory::new(size::mb(4)); /// # mem.phys_write(0.into(), &MAGIC_VALUE).unwrap(); - /// let mut cache = build(&mut mem); + /// let mut cache = build(mem.forward_mut()); /// - /// let value: u64 = cache.phys_read(0.into()).unwrap(); + /// let value: u64 = cache.phys_view().read(0.into()).unwrap(); /// assert_eq!(value, MAGIC_VALUE); /// /// cache.phys_write(0.into(), &0u64).unwrap(); @@ -238,7 +258,7 @@ impl CachedMemoryAccessBuilder { /// // We drop the cache and are able to use mem again /// std::mem::drop(cache); /// - /// let value: u64 = mem.phys_read(0.into()).unwrap(); + /// let value: u64 = mem.phys_view().read(0.into()).unwrap(); /// assert_ne!(value, MAGIC_VALUE); /// ``` pub fn new(mem: T) -> Self { @@ -252,13 +272,16 @@ impl CachedMemoryAccessBuilder { } } -impl CachedMemoryAccessBuilder { - /// Builds the `CachedMemoryAccess` object or returns an error if the page size is not set. - pub fn build<'a>(self) -> Result> { - Ok(CachedMemoryAccess::new( +impl CachedPhysicalMemoryBuilder { + /// Builds the [`CachedPhysicalMemory`] object or returns an error if the page size is not set. + pub fn build<'a>(self) -> Result> { + Ok(CachedPhysicalMemory::new( self.mem, PageCache::with_page_size( - self.page_size.ok_or("page_size must be initialized")?, + self.page_size.ok_or_else(|| { + Error(ErrorOrigin::Cache, ErrorKind::Uninitialized) + .log_error("page_size must be initialized") + })?, self.cache_size, self.page_type_mask, self.validator, @@ -268,8 +291,9 @@ impl CachedMemoryAccessBuilder { /// Sets a custom validator for the cache. /// - /// If this function is not called it will default to a [`DefaultCacheValidator`](../timed_validator/index.html) - /// for std builds and a /* TODO */ validator for no_std builds. + /// If this function is not called it will default to a [`DefaultCacheValidator`]. + /// The default validator for std builds is the [`TimedCacheValidator`]. + /// The default validator for no_std builds is the [`CountCacheValidator`]. /// /// The default setting is `DefaultCacheValidator::default()`. /// @@ -279,22 +303,26 @@ impl CachedMemoryAccessBuilder { /// use std::time::Duration; /// /// use memflow::architecture::x86::x64; - /// use memflow::mem::{PhysicalMemory, CachedMemoryAccess, DefaultCacheValidator}; + /// use memflow::mem::{PhysicalMemory, CachedPhysicalMemory}; + /// use memflow::types::DefaultCacheValidator; /// /// fn build(mem: T) { - /// let cache = CachedMemoryAccess::builder(mem) + /// let cache = CachedPhysicalMemory::builder(mem) /// .arch(x64::ARCH) /// .validator(DefaultCacheValidator::new(Duration::from_millis(2000).into())) /// .build() /// .unwrap(); /// } - /// # use memflow::mem::dummy::DummyMemory; + /// # use memflow::dummy::DummyMemory; /// # use memflow::types::size; /// # let mut mem = DummyMemory::new(size::mb(4)); /// # build(mem); /// ``` - pub fn validator(self, validator: QN) -> CachedMemoryAccessBuilder { - CachedMemoryAccessBuilder { + pub fn validator( + self, + validator: QN, + ) -> CachedPhysicalMemoryBuilder { + CachedPhysicalMemoryBuilder { mem: self.mem, validator, page_size: self.page_size, @@ -315,15 +343,15 @@ impl CachedMemoryAccessBuilder { /// /// ``` /// use memflow::types::size; - /// use memflow::mem::{PhysicalMemory, CachedMemoryAccess}; + /// use memflow::mem::{PhysicalMemory, CachedPhysicalMemory}; /// /// fn build(mem: T) { - /// let cache = CachedMemoryAccess::builder(mem) + /// let cache = CachedPhysicalMemory::builder(mem) /// .page_size(size::kb(4)) /// .build() /// .unwrap(); /// } - /// # use memflow::mem::dummy::DummyMemory; + /// # use memflow::dummy::DummyMemory; /// # let mut mem = DummyMemory::new(size::mb(4)); /// # build(mem); /// ``` @@ -332,10 +360,10 @@ impl CachedMemoryAccessBuilder { self } - /// Retrieves the page size for this cache from the given `Architecture`. + /// Retrieves the page size for this cache from the given [`Architecture`]. /// /// The cache has to know the exact page size of the target system internally to give reasonable performance. - /// The page size can be either fetched from the `Architecture` via this method or it can be set directly + /// The page size can be either fetched from the [`Architecture`] via this method or it can be set directly /// via the `page_size()` method of the builder. /// /// If the page size is not set the builder will fail. @@ -344,21 +372,21 @@ impl CachedMemoryAccessBuilder { /// /// ``` /// use memflow::architecture::x86::x64; - /// use memflow::mem::{PhysicalMemory, CachedMemoryAccess}; + /// use memflow::mem::{PhysicalMemory, CachedPhysicalMemory}; /// /// fn build(mem: T) { - /// let cache = CachedMemoryAccess::builder(mem) + /// let cache = CachedPhysicalMemory::builder(mem) /// .arch(x64::ARCH) /// .build() /// .unwrap(); /// } - /// # use memflow::mem::dummy::DummyMemory; + /// # use memflow::dummy::DummyMemory; /// # use memflow::types::size; /// # let mut mem = DummyMemory::new(size::mb(4)); /// # build(mem); /// ``` - pub fn arch(mut self, arch: ArchitectureObj) -> Self { - self.page_size = Some(arch.page_size()); + pub fn arch(mut self, arch: impl Into) -> Self { + self.page_size = Some(arch.into().page_size()); self } @@ -376,16 +404,16 @@ impl CachedMemoryAccessBuilder { /// ``` /// use memflow::types::size; /// use memflow::architecture::x86::x64; - /// use memflow::mem::{PhysicalMemory, CachedMemoryAccess}; + /// use memflow::mem::{PhysicalMemory, CachedPhysicalMemory}; /// /// fn build(mem: T) { - /// let cache = CachedMemoryAccess::builder(mem) + /// let cache = CachedPhysicalMemory::builder(mem) /// .arch(x64::ARCH) /// .cache_size(size::mb(2)) /// .build() /// .unwrap(); /// } - /// # use memflow::mem::dummy::DummyMemory; + /// # use memflow::dummy::DummyMemory; /// # let mut mem = DummyMemory::new(size::mb(4)); /// # build(mem); /// ``` @@ -409,16 +437,16 @@ impl CachedMemoryAccessBuilder { /// ``` /// use memflow::types::PageType; /// use memflow::architecture::x86::x32; - /// use memflow::mem::{PhysicalMemory, CachedMemoryAccess}; + /// use memflow::mem::{PhysicalMemory, CachedPhysicalMemory}; /// /// fn build(mem: T) { - /// let cache = CachedMemoryAccess::builder(mem) + /// let cache = CachedPhysicalMemory::builder(mem) /// .arch(x32::ARCH) /// .page_type_mask(PageType::PAGE_TABLE | PageType::READ_ONLY) /// .build() /// .unwrap(); /// } - /// # use memflow::mem::dummy::DummyMemory; + /// # use memflow::dummy::DummyMemory; /// # use memflow::types::size; /// # let mut mem = DummyMemory::new(size::mb(4)); /// # build(mem); @@ -428,3 +456,10 @@ impl CachedMemoryAccessBuilder { self } } + +#[cfg(feature = "plugins")] +cglue::cglue_impl_group!( + CachedPhysicalMemory<'cglue_a, T: PhysicalMemory, Q: CacheValidator>, + crate::plugins::ConnectorInstance, + {} +); diff --git a/apex_dma/memflow_lib/memflow/src/mem/cache/page_cache.rs b/apex_dma/memflow_lib/memflow/src/mem/phys_mem/middleware/cache/page_cache.rs similarity index 67% rename from apex_dma/memflow_lib/memflow/src/mem/cache/page_cache.rs rename to apex_dma/memflow_lib/memflow/src/mem/phys_mem/middleware/cache/page_cache.rs index 5f245a8..b7cf730 100644 --- a/apex_dma/memflow_lib/memflow/src/mem/cache/page_cache.rs +++ b/apex_dma/memflow_lib/memflow/src/mem/phys_mem/middleware/cache/page_cache.rs @@ -1,14 +1,14 @@ -use std::prelude::v1::*; - -use super::{CacheValidator, PageType}; use crate::architecture::ArchitectureObj; use crate::error::Result; use crate::iter::PageChunks; -use crate::mem::phys_mem::{PhysicalMemory, PhysicalReadData, PhysicalReadIterator}; -use crate::types::{Address, PhysicalAddress}; -use bumpalo::{collections::Vec as BumpVec, Bump}; +use crate::mem::mem_data::*; +use crate::mem::phys_mem::*; +use crate::types::{cache::CacheValidator, umem, Address, PageType, PhysicalAddress}; + use std::alloc::{alloc, alloc_zeroed, dealloc, Layout}; +use bumpalo::{collections::Vec as BumpVec, Bump}; + pub enum PageValidity<'a> { Invalid, Validatable(&'a mut [u8]), @@ -40,6 +40,7 @@ pub struct PageCache<'a, T> { unsafe impl<'a, T> Send for PageCache<'a, T> {} +#[allow(clippy::needless_option_as_deref)] impl<'a, T: CacheValidator> PageCache<'a, T> { pub fn new(arch: ArchitectureObj, size: usize, page_type_mask: PageType, validator: T) -> Self { Self::with_page_size(arch.page_size(), size, page_type_mask, validator) @@ -82,13 +83,14 @@ impl<'a, T: CacheValidator> PageCache<'a, T> { } fn page_index(&self, addr: Address) -> usize { - (addr.as_page_aligned(self.page_size).as_usize() / self.page_size) % self.address.len() + ((addr.as_page_aligned(self.page_size).to_umem() / self.page_size as umem) + % (self.address.len() as umem)) as usize } fn take_page(&mut self, addr: Address, skip_validator: bool) -> PageValidity<'a> { let page_index = self.page_index(addr); - let bufopt = std::mem::replace(&mut self.page_refs[page_index], None); + let bufopt = self.page_refs[page_index].take(); if let Some(buf) = bufopt { if self.address[page_index] == addr.as_page_aligned(self.page_size) @@ -148,6 +150,16 @@ impl<'a, T: CacheValidator> PageCache<'a, T> { self.address_once_validated[idx] = aligned_addr; } + pub fn cancel_page_validation(&mut self, addr: Address, page_buf: &'a mut [u8]) { + let idx = self.page_index(addr); + // We could leave it in previous validity state, + // but the buffer could have been partially written... + if self.address_once_validated[idx] == addr { + self.invalidate_page_raw(addr); + self.put_page(addr, page_buf); + } + } + pub fn validate_page(&mut self, addr: Address, page_buf: &'a mut [u8]) { let idx = self.page_index(addr); self.address[idx] = addr; @@ -156,54 +168,65 @@ impl<'a, T: CacheValidator> PageCache<'a, T> { self.put_page(addr, page_buf); } + pub fn invalidate_page_raw(&mut self, addr: Address) { + let idx = self.page_index(addr); + self.validator.invalidate_slot(idx); + self.address[idx] = Address::INVALID; + self.address_once_validated[idx] = Address::INVALID; + } + pub fn invalidate_page(&mut self, addr: Address, page_type: PageType) { if self.page_type_mask.contains(page_type) { - let idx = self.page_index(addr); - self.validator.invalidate_slot(idx); - self.address[idx] = Address::INVALID; - self.address_once_validated[idx] = Address::INVALID; + self.invalidate_page_raw(addr) } } pub fn split_to_chunks( - PhysicalReadData(addr, out): PhysicalReadData<'_>, + CTup3(addr, meta_addr, out): PhysicalReadData<'_>, page_size: usize, ) -> impl PhysicalReadIterator<'_> { - out.page_chunks(addr.address(), page_size) - .map(move |(paddr, chunk)| { - PhysicalReadData( - PhysicalAddress::with_page(paddr, addr.page_type(), addr.page_size()), + (meta_addr, out).page_chunks(addr.address(), page_size).map( + move |(paddr, (meta_addr, chunk))| { + CTup3( + PhysicalAddress::with_page(paddr, addr.page_type(), addr.page_size() as umem), + meta_addr, chunk, ) - }) + }, + ) } - pub fn cached_read( + // TODO: do this properly + pub fn cached_read<'b, F: PhysicalMemory>( &mut self, mem: &mut F, - data: &mut [PhysicalReadData], - arena: &Bump, + MemOps { + inp: mut iter, + out: mut cb_out, + out_fail: mut cb_fail, + }: PhysicalReadMemOps, + arena: &'b Bump, ) -> Result<()> { let page_size = self.page_size; - let mut iter = data.iter_mut(); - { let mut next = iter.next(); let mut clist = BumpVec::new_in(arena); let mut wlist = BumpVec::new_in(arena); let mut wlistcache = BumpVec::new_in(arena); - while let Some(PhysicalReadData(addr, out)) = next { + while let Some(CTup3(addr, meta_addr, out)) = next { if self.is_cached_page_type(addr.page_type()) { - out.page_chunks(addr.address(), page_size) - .for_each(|(paddr, chunk)| { - let prd = PhysicalReadData( + (meta_addr, out) + .page_chunks(addr.address(), page_size) + .for_each(|(paddr, (meta_addr, chunk))| { + let mut prd = CTup3( PhysicalAddress::with_page( paddr, addr.page_type(), - addr.page_size(), + addr.page_size() as umem, ), + meta_addr, chunk, ); @@ -213,16 +236,21 @@ impl<'a, T: CacheValidator> PageCache<'a, T> { PageValidity::Valid(buf) => { let aligned_addr = paddr.as_page_aligned(self.page_size); let start = paddr - aligned_addr; - let cached_buf = - buf.split_at_mut(start).1.split_at_mut(prd.1.len()).0; - prd.1.copy_from_slice(cached_buf); + let cached_buf = buf + .split_at_mut(start as usize) + .1 + .split_at_mut(prd.2.len()) + .0; + prd.2.copy_from_slice(cached_buf); + opt_call(cb_out.as_deref_mut(), CTup2(prd.1, prd.2)); self.put_page(cached_page.address, buf); } PageValidity::Validatable(buf) => { clist.push(prd); - wlistcache.push(PhysicalReadData( + wlistcache.push(CTup3( PhysicalAddress::from(cached_page.address), - buf, + meta_addr, + buf.into(), )); self.mark_page_for_validation(cached_page.address); } @@ -235,7 +263,7 @@ impl<'a, T: CacheValidator> PageCache<'a, T> { } }); } else { - wlist.push(PhysicalReadData(*addr, out)); + wlist.push(CTup3(addr, meta_addr, out)); } next = iter.next(); @@ -246,32 +274,58 @@ impl<'a, T: CacheValidator> PageCache<'a, T> { || clist.len() >= 64 { if !wlist.is_empty() { - mem.phys_read_raw_list(&mut wlist)?; + { + let mut drain = wlist.drain(..); + mem.phys_read_raw_iter(MemOps { + inp: (&mut drain).into(), + out_fail: cb_fail.as_deref_mut(), + out: cb_out.as_deref_mut(), + })?; + } wlist.clear(); } if !wlistcache.is_empty() { - mem.phys_read_raw_list(&mut wlistcache)?; - - wlistcache - .into_iter() - .for_each(|PhysicalReadData(addr, buf)| { - self.validate_page(addr.address(), buf) - }); + let mut iter = wlistcache.iter_mut().map( + |CTup3(addr, _, buf): &mut PhysicalReadData| { + CTup3(*addr, addr.address(), buf.into()) + }, + ); + + let callback = &mut |CTup2(addr, buf): ReadData<'a>| { + self.validate_page(addr, buf.into()); + true + }; + + let mut callback = callback.into(); + + mem.phys_read_raw_iter(MemOps { + inp: (&mut iter).into(), + out: Some(&mut callback), + out_fail: None, + })?; + + wlistcache.into_iter().for_each(|CTup3(addr, _, buf)| { + self.cancel_page_validation(addr.address(), buf.into()); + }); wlistcache = BumpVec::new_in(arena); } - while let Some(PhysicalReadData(addr, out)) = clist.pop() { + while let Some(CTup3(addr, meta_addr, mut out)) = clist.pop() { let cached_page = self.cached_page_mut(addr.address(), false); let aligned_addr = cached_page.address.as_page_aligned(self.page_size); let start = addr.address() - aligned_addr; if let PageValidity::Valid(buf) = cached_page.validity { - let cached_buf = buf.split_at_mut(start).1.split_at_mut(out.len()).0; + let cached_buf = + buf.split_at_mut(start as usize).1.split_at_mut(out.len()).0; out.copy_from_slice(cached_buf); self.put_page(cached_page.address, buf); + opt_call(cb_out.as_deref_mut(), CTup2(meta_addr, out)); + } else { + opt_call(cb_fail.as_deref_mut(), CTup2(meta_addr, out)); } } } @@ -336,9 +390,10 @@ impl<'a, T> Drop for PageCache<'a, T> { mod tests { use super::*; use crate::architecture::x86; - use crate::mem::{dummy::DummyMemory, CachedMemoryAccess, TimedCacheValidator}; - use crate::mem::{VirtualDMA, VirtualMemory}; - use crate::types::{size, Address, PhysicalAddress}; + use crate::cglue::ForwardMut; + use crate::dummy::{DummyMemory, DummyOs}; + use crate::mem::{CachedPhysicalMemory, MemoryView, VirtualDma}; + use crate::types::{cache::TimedCacheValidator, size, Address, PhysicalAddress}; use coarsetime::Duration; use rand::{thread_rng, Rng}; @@ -373,15 +428,16 @@ mod tests { #[test] fn cloned_validity() { - let mut mem = DummyMemory::with_seed(size::mb(32), 0); + let mem = DummyMemory::new(size::mb(32)); + let mut dummy_os = DummyOs::with_seed(mem, 0); let cmp_buf = [143u8; 16]; - let write_addr = 0.into(); + let write_addr = PhysicalAddress::NULL; - mem.phys_write_raw(write_addr, &cmp_buf).unwrap(); + dummy_os.as_mut().phys_write(write_addr, &cmp_buf).unwrap(); let arch = x86::x64::ARCH; - let mut mem = CachedMemoryAccess::builder(mem) + let mut mem = CachedPhysicalMemory::builder(dummy_os.into_inner()) .validator(TimedCacheValidator::new(Duration::from_secs(100))) .page_type_mask(PageType::UNKNOWN) .arch(arch) @@ -389,14 +445,14 @@ mod tests { .unwrap(); let mut read_buf = [0u8; 16]; - mem.phys_read_raw_into(write_addr, &mut read_buf).unwrap(); + mem.phys_read_into(write_addr, &mut read_buf).unwrap(); assert_eq!(read_buf, cmp_buf); let mut cloned_mem = mem.clone(); let mut cloned_read_buf = [0u8; 16]; cloned_mem - .phys_read_raw_into(write_addr, &mut cloned_read_buf) + .phys_read_into(write_addr, &mut cloned_read_buf) .unwrap(); assert_eq!(cloned_read_buf, cmp_buf); } @@ -406,8 +462,9 @@ mod tests { /// The predetermined seed was found to be problematic when it comes to memory overlap #[test] fn big_virt_buf() { - for &seed in &[0x3ffd_235c_5194_dedf, thread_rng().gen_range(0, !0u64)] { - let mut dummy_mem = DummyMemory::with_seed(size::mb(512), seed); + for &seed in &[0x3ffd_235c_5194_dedf, thread_rng().gen_range(0..!0u64)] { + let dummy_mem = DummyMemory::new(size::mb(512)); + let mut dummy_os = DummyOs::with_seed(dummy_mem, seed); let virt_size = size::mb(18); let mut test_buf = vec![0_u64; virt_size / 8]; @@ -419,16 +476,16 @@ mod tests { let test_buf = unsafe { std::slice::from_raw_parts(test_buf.as_ptr() as *const u8, virt_size) }; - let (dtb, virt_base) = dummy_mem.alloc_dtb(virt_size, &test_buf); + let (dtb, virt_base) = dummy_os.alloc_dtb(virt_size, test_buf); let arch = x86::x64::ARCH; println!("dtb={:x} virt_base={:x} seed={:x}", dtb, virt_base, seed); let translator = x86::x64::new_translator(dtb); let mut buf_nocache = vec![0_u8; test_buf.len()]; { - let mut virt_mem = VirtualDMA::new(&mut dummy_mem, arch, translator); + let mut virt_mem = VirtualDma::new(dummy_os.forward_mut(), arch, translator); virt_mem - .virt_read_raw_into(virt_base, buf_nocache.as_mut_slice()) + .read_raw_into(virt_base, buf_nocache.as_mut_slice()) .unwrap(); } @@ -447,12 +504,12 @@ mod tests { PageType::PAGE_TABLE | PageType::READ_ONLY, TimedCacheValidator::new(Duration::from_secs(100)), ); - let mut mem_cache = CachedMemoryAccess::new(&mut dummy_mem, cache); + let mut mem_cache = CachedPhysicalMemory::new(dummy_os.forward_mut(), cache); let mut buf_cache = vec![0_u8; buf_nocache.len()]; { - let mut virt_mem = VirtualDMA::new(&mut mem_cache, arch, translator); + let mut virt_mem = VirtualDma::new(mem_cache.forward_mut(), arch, translator); virt_mem - .virt_read_raw_into(virt_base, buf_cache.as_mut_slice()) + .read_raw_into(virt_base, buf_cache.as_mut_slice()) .unwrap(); } @@ -470,14 +527,15 @@ mod tests { #[test] fn cache_invalidity_cached() { - let mut dummy_mem = DummyMemory::new(size::mb(64)); - let mem_ptr = &mut dummy_mem as *mut DummyMemory; + let dummy_mem = DummyMemory::new(size::mb(64)); + let mut dummy_os = DummyOs::new(dummy_mem); + let mem_ptr = dummy_os.as_mut() as *mut DummyMemory; let virt_size = size::mb(8); let mut buf_start = vec![0_u8; 64]; for (i, item) in buf_start.iter_mut().enumerate() { *item = (i % 256) as u8; } - let (dtb, virt_base) = dummy_mem.alloc_dtb(virt_size, &buf_start); + let (dtb, virt_base) = dummy_os.alloc_dtb(virt_size, &buf_start); let arch = x86::x64::ARCH; let translator = x86::x64::new_translator(dtb); @@ -488,32 +546,33 @@ mod tests { TimedCacheValidator::new(Duration::from_secs(100)), ); - let mut mem_cache = CachedMemoryAccess::new(&mut dummy_mem, cache); + let mut mem_cache = CachedPhysicalMemory::new(dummy_os.forward_mut(), cache); //Modifying the memory from other channels should leave the cached page unchanged let mut cached_buf = vec![0_u8; 64]; { - let mut virt_mem = VirtualDMA::new(&mut mem_cache, arch, translator); + let mut virt_mem = VirtualDma::new(mem_cache.forward_mut(), arch, translator); virt_mem - .virt_read_raw_into(virt_base, cached_buf.as_mut_slice()) + .read_raw_into(virt_base, cached_buf.as_mut_slice()) .unwrap(); } let mut write_buf = cached_buf.clone(); write_buf[16..20].copy_from_slice(&[255, 255, 255, 255]); { - let mut virt_mem = - VirtualDMA::new(unsafe { mem_ptr.as_mut().unwrap() }, arch, translator); - virt_mem - .virt_write_raw(virt_base, write_buf.as_slice()) - .unwrap(); + let mut virt_mem = VirtualDma::new( + unsafe { mem_ptr.as_mut().unwrap() }.forward_mut(), + arch, + translator, + ); + virt_mem.write_raw(virt_base, write_buf.as_slice()).unwrap(); } let mut check_buf = vec![0_u8; 64]; { - let mut virt_mem = VirtualDMA::new(&mut mem_cache, arch, translator); + let mut virt_mem = VirtualDma::new(mem_cache.forward_mut(), arch, translator); virt_mem - .virt_read_raw_into(virt_base, check_buf.as_mut_slice()) + .read_raw_into(virt_base, check_buf.as_mut_slice()) .unwrap(); } @@ -523,14 +582,15 @@ mod tests { #[test] fn cache_invalidity_non_cached() { - let mut dummy_mem = DummyMemory::new(size::mb(64)); - let mem_ptr = &mut dummy_mem as *mut DummyMemory; + let dummy_mem = DummyMemory::new(size::mb(64)); + let mut dummy_os = DummyOs::new(dummy_mem); + let mem_ptr = dummy_os.as_mut() as *mut DummyMemory; let virt_size = size::mb(8); let mut buf_start = vec![0_u8; 64]; for (i, item) in buf_start.iter_mut().enumerate() { *item = (i % 256) as u8; } - let (dtb, virt_base) = dummy_mem.alloc_dtb(virt_size, &buf_start); + let (dtb, virt_base) = dummy_os.alloc_dtb(virt_size, &buf_start); let arch = x86::x64::ARCH; let translator = x86::x64::new_translator(dtb); @@ -542,32 +602,33 @@ mod tests { TimedCacheValidator::new(Duration::from_secs(100)), ); - let mut mem_cache = CachedMemoryAccess::new(&mut dummy_mem, cache); + let mut mem_cache = CachedPhysicalMemory::new(dummy_os.forward_mut(), cache); //Modifying the memory from other channels should leave the cached page unchanged let mut cached_buf = vec![0_u8; 64]; { - let mut virt_mem = VirtualDMA::new(&mut mem_cache, arch, translator); + let mut virt_mem = VirtualDma::new(mem_cache.forward_mut(), arch, translator); virt_mem - .virt_read_raw_into(virt_base, cached_buf.as_mut_slice()) + .read_raw_into(virt_base, cached_buf.as_mut_slice()) .unwrap(); } let mut write_buf = cached_buf.clone(); write_buf[16..20].copy_from_slice(&[255, 255, 255, 255]); { - let mut virt_mem = - VirtualDMA::new(unsafe { mem_ptr.as_mut().unwrap() }, arch, translator); - virt_mem - .virt_write_raw(virt_base, write_buf.as_slice()) - .unwrap(); + let mut virt_mem = VirtualDma::new( + unsafe { mem_ptr.as_mut().unwrap() }.forward_mut(), + arch, + translator, + ); + virt_mem.write_raw(virt_base, write_buf.as_slice()).unwrap(); } let mut check_buf = vec![0_u8; 64]; { - let mut virt_mem = VirtualDMA::new(mem_cache, arch, translator); + let mut virt_mem = VirtualDma::new(mem_cache.forward_mut(), arch, translator); virt_mem - .virt_read_raw_into(virt_base, check_buf.as_mut_slice()) + .read_raw_into(virt_base, check_buf.as_mut_slice()) .unwrap(); } @@ -581,7 +642,8 @@ mod tests { /// caches a different page in the entry before the said copy is operation is made. #[test] fn cache_phys_mem_overlap() { - let mut dummy_mem = DummyMemory::new(size::mb(16)); + let dummy_mem = DummyMemory::new(size::mb(16)); + let mut dummy_os = DummyOs::new(dummy_mem); let buf_size = size::kb(8); let mut buf_start = vec![0_u8; buf_size]; @@ -589,12 +651,13 @@ mod tests { *item = ((i / 115) % 256) as u8; } - let address = Address::from(0); + let address = Address::NULL; let addr = PhysicalAddress::with_page(address, PageType::default().write(false), 0x1000); - dummy_mem - .phys_write_raw(addr, buf_start.as_slice()) + dummy_os + .as_mut() + .phys_write(addr, buf_start.as_slice()) .unwrap(); let arch = x86::x64::ARCH; @@ -606,13 +669,22 @@ mod tests { TimedCacheValidator::new(Duration::from_secs(100)), ); - let mut mem_cache = CachedMemoryAccess::new(&mut dummy_mem, cache); + let mut mem_cache = CachedPhysicalMemory::new(dummy_os.forward_mut(), cache); let mut buf_1 = vec![0_u8; buf_size]; + mem_cache + .phys_read_into(addr, buf_1.as_mut_slice()) + .unwrap(); + println!("READ CACHED {:p}", buf_1.as_ptr()); + println!("BS {:?} {:p}", &buf_start[..128], buf_start.as_ptr()); + println!("B1 {:?} {:p}", &buf_1[..128], buf_1.as_ptr()); mem_cache .phys_read_into(addr, buf_1.as_mut_slice()) .unwrap(); + println!("BS {:?} {:p}", &buf_start[..128], buf_start.as_ptr()); + println!("B1 {:?} {:p}", &buf_1[..128], buf_1.as_ptr()); + assert!( buf_start == buf_1, "buf_start != buf_1; diff: {:?}", @@ -639,7 +711,8 @@ mod tests { #[test] fn cache_phys_mem() { - let mut dummy_mem = DummyMemory::new(size::mb(16)); + let dummy_mem = DummyMemory::new(size::mb(16)); + let mut dummy_os = DummyOs::new(dummy_mem); let mut buf_start = vec![0_u8; 64]; for (i, item) in buf_start.iter_mut().enumerate() { @@ -650,8 +723,9 @@ mod tests { let addr = PhysicalAddress::with_page(address, PageType::default().write(false), 0x1000); - dummy_mem - .phys_write_raw(addr, buf_start.as_slice()) + dummy_os + .as_mut() + .phys_write(addr, buf_start.as_slice()) .unwrap(); let arch = x86::x64::ARCH; @@ -663,7 +737,7 @@ mod tests { TimedCacheValidator::new(Duration::from_secs(100)), ); - let mut mem_cache = CachedMemoryAccess::new(&mut dummy_mem, cache); + let mut mem_cache = CachedPhysicalMemory::new(dummy_os.forward_mut(), cache); let mut buf_1 = vec![0_u8; 64]; mem_cache @@ -674,7 +748,8 @@ mod tests { } #[test] fn cache_phys_mem_diffpages() { - let mut dummy_mem = DummyMemory::new(size::mb(16)); + let dummy_mem = DummyMemory::new(size::mb(16)); + let mut dummy_os = DummyOs::new(dummy_mem); let mut buf_start = vec![0_u8; 64]; for (i, item) in buf_start.iter_mut().enumerate() { @@ -687,8 +762,9 @@ mod tests { let addr2 = PhysicalAddress::with_page(address, PageType::default().write(false), 0x100); - dummy_mem - .phys_write_raw(addr1, buf_start.as_slice()) + dummy_os + .as_mut() + .phys_write(addr1, buf_start.as_slice()) .unwrap(); let cache = PageCache::with_page_size( @@ -698,7 +774,7 @@ mod tests { TimedCacheValidator::new(Duration::from_secs(100)), ); - let mut mem_cache = CachedMemoryAccess::new(&mut dummy_mem, cache); + let mut mem_cache = CachedPhysicalMemory::new(dummy_os.forward_mut(), cache); let mut buf_1 = vec![0_u8; 64]; mem_cache @@ -724,13 +800,14 @@ mod tests { #[test] fn writeback() { - let mut dummy_mem = DummyMemory::new(size::mb(16)); + let dummy_mem = DummyMemory::new(size::mb(16)); + let mut dummy_os = DummyOs::new(dummy_mem); let virt_size = size::mb(8); let mut buf_start = vec![0_u8; 64]; for (i, item) in buf_start.iter_mut().enumerate() { *item = (i % 256) as u8; } - let (dtb, virt_base) = dummy_mem.alloc_dtb(virt_size, &buf_start); + let (dtb, virt_base) = dummy_os.alloc_dtb(virt_size, &buf_start); let arch = x86::x64::ARCH; let translator = x86::x64::new_translator(dtb); @@ -741,31 +818,25 @@ mod tests { TimedCacheValidator::new(Duration::from_secs(100)), ); - let mut mem_cache = CachedMemoryAccess::new(&mut dummy_mem, cache); - let mut virt_mem = VirtualDMA::new(&mut mem_cache, arch, translator); + let mem_cache = CachedPhysicalMemory::new(dummy_os.forward_mut(), cache); + let mut virt_mem = VirtualDma::new(mem_cache, arch, translator); let mut buf_1 = vec![0_u8; 64]; - virt_mem - .virt_read_into(virt_base, buf_1.as_mut_slice()) - .unwrap(); + virt_mem.read_into(virt_base, buf_1.as_mut_slice()).unwrap(); assert_eq!(buf_start, buf_1); buf_1[16..20].copy_from_slice(&[255, 255, 255, 255]); - virt_mem.virt_write(virt_base + 16, &buf_1[16..20]).unwrap(); + virt_mem.write(virt_base + 16_u64, &buf_1[16..20]).unwrap(); let mut buf_2 = vec![0_u8; 64]; - virt_mem - .virt_read_into(virt_base, buf_2.as_mut_slice()) - .unwrap(); + virt_mem.read_into(virt_base, buf_2.as_mut_slice()).unwrap(); assert_eq!(buf_1, buf_2); assert_ne!(buf_2, buf_start); let mut buf_3 = vec![0_u8; 64]; - virt_mem - .virt_read_into(virt_base, buf_3.as_mut_slice()) - .unwrap(); + virt_mem.read_into(virt_base, buf_3.as_mut_slice()).unwrap(); assert_eq!(buf_2, buf_3); } } diff --git a/apex_dma/memflow_lib/memflow/src/mem/phys_mem/middleware/delay.rs b/apex_dma/memflow_lib/memflow/src/mem/phys_mem/middleware/delay.rs new file mode 100644 index 0000000..b584efc --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/mem/phys_mem/middleware/delay.rs @@ -0,0 +1,223 @@ +use ::std::{thread, time::Duration}; + +use crate::error::Result; +use crate::mem::{ + PhysicalMemory, PhysicalMemoryMapping, PhysicalMemoryMetadata, PhysicalReadMemOps, + PhysicalWriteMemOps, +}; + +/// The delay middleware introduces delay and jitter into physical reads which allows +/// users to simulate different connectors and setups. +/// +/// Since this middleware implements [`PhysicalMemory`] it can be used as a replacement +/// in all structs and functions that require the [`PhysicalMemory`] trait. +pub struct DelayedPhysicalMemory { + mem: T, + delay: Duration, +} + +impl Clone for DelayedPhysicalMemory +where + T: Clone, +{ + fn clone(&self) -> Self { + Self { + mem: self.mem.clone(), + delay: self.delay, + } + } +} + +impl DelayedPhysicalMemory { + /// Constructs a new middleware with the given delay. + /// + /// This function is used when manually constructing a middleware inside of the memflow crate itself. + /// + /// For general usage it is advised to just use the [builder](struct.DelayedPhysicalMemoryBuilder.html) + /// to construct the delay. + pub fn new(mem: T, delay: Duration) -> Self { + Self { mem, delay } + } + + /// Consumes self and returns the containing memory object. + /// + /// This function can be useful in case the ownership over the memory object has been given to the cache + /// when it was being constructed. + /// It will destroy the `self` and return back the ownership of the underlying memory object. + /// + /// # Examples + /// ``` + /// # const MAGIC_VALUE: u64 = 0x23bd_318f_f3a3_5821; + /// use memflow::architecture::x86::x64; + /// use memflow::mem::{PhysicalMemory, DelayedPhysicalMemory, MemoryView}; + /// + /// fn build(mem: T) -> T { + /// let mut middleware = DelayedPhysicalMemory::builder(mem) + /// .build() + /// .unwrap(); + /// + /// // use the middleware... + /// let value: u64 = middleware.phys_view().read(0.into()).unwrap(); + /// assert_eq!(value, MAGIC_VALUE); + /// + /// // retrieve ownership of mem and return it back + /// middleware.into_inner() + /// } + /// # use memflow::dummy::DummyMemory; + /// # use memflow::types::size; + /// # let mut mem = DummyMemory::new(size::mb(4)); + /// # mem.phys_write(0.into(), &MAGIC_VALUE).unwrap(); + /// # build(mem); + /// ``` + pub fn into_inner(self) -> T { + self.mem + } +} + +impl DelayedPhysicalMemory { + /// Returns a new builder for the delay middleware with default settings. + pub fn builder(mem: T) -> DelayedPhysicalMemoryBuilder { + DelayedPhysicalMemoryBuilder::new(mem) + } +} + +// forward PhysicalMemory trait fncs +impl PhysicalMemory for DelayedPhysicalMemory { + #[inline] + fn phys_read_raw_iter(&mut self, data: PhysicalReadMemOps) -> Result<()> { + thread::sleep(self.delay); + self.mem.phys_read_raw_iter(data) + } + + #[inline] + fn phys_write_raw_iter(&mut self, data: PhysicalWriteMemOps) -> Result<()> { + thread::sleep(self.delay); + self.mem.phys_write_raw_iter(data) + } + + #[inline] + fn metadata(&self) -> PhysicalMemoryMetadata { + self.mem.metadata() + } + + #[inline] + fn set_mem_map(&mut self, mem_map: &[PhysicalMemoryMapping]) { + self.mem.set_mem_map(mem_map) + } +} + +/// The builder interface for constructing a `DelayedPhysicalMemory` object. +pub struct DelayedPhysicalMemoryBuilder { + mem: T, + delay: Duration, +} + +impl DelayedPhysicalMemoryBuilder { + /// Creates a new `DelayedPhysicalMemory` builder. + /// The memory object is mandatory as the DelayedPhysicalMemory struct wraps around it. + /// + /// Without further adjustments this function creates a middleware with a delay of 10 milliseconds + /// for each read and write. + /// + /// It is also possible to either let the `DelayedPhysicalMemory` object own or just borrow the underlying memory object. + /// + /// # Examples + /// Moves ownership of a mem object and retrieves it back: + /// ``` + /// # const MAGIC_VALUE: u64 = 0x23bd_318f_f3a3_5821; + /// use memflow::architecture::x86::x64; + /// use memflow::mem::{PhysicalMemory, DelayedPhysicalMemory, MemoryView}; + /// + /// fn build(mem: T) { + /// let mut middleware = DelayedPhysicalMemory::builder(mem) + /// .build() + /// .unwrap(); + /// + /// middleware.phys_write(0.into(), &MAGIC_VALUE); + /// + /// let mut mem = middleware.into_inner(); + /// + /// let value: u64 = mem.phys_view().read(0.into()).unwrap(); + /// assert_eq!(value, MAGIC_VALUE); + /// } + /// # use memflow::dummy::DummyMemory; + /// # use memflow::types::size; + /// # let mut mem = DummyMemory::new(size::mb(4)); + /// # mem.phys_write(0.into(), &0xffaaffaau64).unwrap(); + /// # build(mem); + /// ``` + /// + /// Borrowing a mem object: + /// ``` + /// # const MAGIC_VALUE: u64 = 0x23bd_318f_f3a3_5821; + /// use memflow::architecture::x86::x64; + /// use memflow::mem::{PhysicalMemory, DelayedPhysicalMemory, MemoryView}; + /// use memflow::cglue::{Fwd, ForwardMut}; + /// + /// fn build(mem: Fwd<&mut T>) + /// -> impl PhysicalMemory + '_ { + /// DelayedPhysicalMemory::builder(mem) + /// .build() + /// .unwrap() + /// } + /// + /// # use memflow::dummy::DummyMemory; + /// # use memflow::types::size; + /// # let mut mem = DummyMemory::new(size::mb(4)); + /// # mem.phys_write(0.into(), &MAGIC_VALUE).unwrap(); + /// let mut middleware = build(mem.forward_mut()); + /// + /// let value: u64 = middleware.phys_view().read(0.into()).unwrap(); + /// assert_eq!(value, MAGIC_VALUE); + /// + /// middleware.phys_write(0.into(), &0u64).unwrap(); + /// + /// // We drop the cache and are able to use mem again + /// std::mem::drop(middleware); + /// + /// let value: u64 = mem.phys_view().read(0.into()).unwrap(); + /// assert_ne!(value, MAGIC_VALUE); + /// ``` + pub fn new(mem: T) -> Self { + Self { + mem, + delay: Duration::from_millis(10), + } + } + + /// Changes the delay of the middleware. + /// + /// # Examples + /// + /// ``` + /// use memflow::types::size; + /// use memflow::mem::{PhysicalMemory, DelayedPhysicalMemory}; + /// use std::time::Duration; + /// + /// fn build(mem: T) { + /// let middleware = DelayedPhysicalMemory::builder(mem) + /// .delay(Duration::from_millis(10)) + /// .build() + /// .unwrap(); + /// } + /// # use memflow::dummy::DummyMemory; + /// # let mut mem = DummyMemory::new(size::mb(4)); + /// # build(mem); + /// ``` + pub fn delay(mut self, delay: Duration) -> Self { + self.delay = delay; + self + } + + /// Builds the `DelayedPhysicalMemory` object or returns an error. + pub fn build(self) -> Result> { + Ok(DelayedPhysicalMemory::new(self.mem, self.delay)) + } +} + +#[cfg(feature = "plugins")] +::cglue::cglue_impl_group!( + DelayedPhysicalMemory, + crate::plugins::ConnectorInstance, + {} +); diff --git a/apex_dma/memflow_lib/memflow/src/mem/phys_mem/middleware/metrics.rs b/apex_dma/memflow_lib/memflow/src/mem/phys_mem/middleware/metrics.rs new file mode 100644 index 0000000..ca06122 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/mem/phys_mem/middleware/metrics.rs @@ -0,0 +1,336 @@ +use ::log::info; +use ::std::{collections::VecDeque, time::Instant}; + +use crate::mem::{ + PhysicalMemory, PhysicalMemoryMapping, PhysicalMemoryMetadata, PhysicalReadMemOps, + PhysicalWriteMemOps, +}; +use crate::{error::Result, mem::MemOps}; + +/// The metrics middleware collects metrics data (latency and number of bytes) for all read and write operations. +/// Additionally metrics are outputted via `::log::info` in regular intervals. +/// +/// Since this middleware implements [`PhysicalMemory`] it can be used as a replacement +/// in all structs and functions that require the [`PhysicalMemory`] trait. +pub struct PhysicalMemoryMetrics { + mem: T, + reads: MemOpsHistory, + last_read_info: Instant, + writes: MemOpsHistory, + last_write_info: Instant, +} + +impl Clone for PhysicalMemoryMetrics +where + T: Clone, +{ + fn clone(&self) -> Self { + Self { + mem: self.mem.clone(), + reads: self.reads.clone(), + last_read_info: Instant::now(), + writes: self.writes.clone(), + last_write_info: Instant::now(), + } + } +} + +impl PhysicalMemoryMetrics { + /// Constructs a new middleware. + pub fn new(mem: T) -> Self { + // TODO: configurable number of samples? + Self { + mem, + reads: MemOpsHistory::new(0..100, 1.0), + last_read_info: Instant::now(), + writes: MemOpsHistory::new(0..100, 1.0), + last_write_info: Instant::now(), + } + } + + /// Consumes self and returns the containing memory object. + /// + /// This function can be useful in case the ownership over the memory object has been given to the cache + /// when it was being constructed. + /// It will destroy the `self` and return back the ownership of the underlying memory object. + /// + /// # Examples + /// ``` + /// # const MAGIC_VALUE: u64 = 0x23bd_318f_f3a3_5821; + /// use memflow::architecture::x86::x64; + /// use memflow::mem::{PhysicalMemory, PhysicalMemoryMetrics, MemoryView}; + /// + /// fn build(mem: T) -> T { + /// let mut middleware = PhysicalMemoryMetrics::new(mem); + /// + /// // use the middleware... + /// let value: u64 = middleware.phys_view().read(0.into()).unwrap(); + /// assert_eq!(value, MAGIC_VALUE); + /// + /// // retrieve ownership of mem and return it back + /// middleware.into_inner() + /// } + /// # use memflow::dummy::DummyMemory; + /// # use memflow::types::size; + /// # let mut mem = DummyMemory::new(size::mb(4)); + /// # mem.phys_write(0.into(), &MAGIC_VALUE).unwrap(); + /// # build(mem); + /// ``` + pub fn into_inner(self) -> T { + self.mem + } +} + +// forward PhysicalMemory trait fncs +impl PhysicalMemory for PhysicalMemoryMetrics { + #[inline] + fn phys_read_raw_iter( + &mut self, + MemOps { inp, out_fail, out }: PhysicalReadMemOps, + ) -> Result<()> { + let mut number_of_bytes = 0; + let iter = inp.inspect(|e| number_of_bytes += e.2.len()); + + let start_time = Instant::now(); + + let mem = &mut self.mem; + let result = MemOps::with_raw(iter, out, out_fail, |data| mem.phys_read_raw_iter(data)); + + self.reads + .add(start_time.elapsed().as_secs_f64(), number_of_bytes); + + //if self.reads.total_count() % 10000 == 0 { + if self.last_read_info.elapsed().as_secs_f64() >= 1f64 { + info!( + "Read Metrics: reads_per_second={} average_latency={:.4}ms; average_bytes={}; bytes_per_second={}", + self.reads.len(), + self.reads.average_latency().unwrap_or_default() * 1000f64, + self.reads.average_bytes().unwrap_or_default(), + self.reads.bandwidth().unwrap_or_default(), + ); + self.last_read_info = Instant::now(); + } + + result + } + + #[inline] + fn phys_write_raw_iter( + &mut self, + MemOps { inp, out_fail, out }: PhysicalWriteMemOps, + ) -> Result<()> { + let mut number_of_bytes = 0; + let iter = inp.inspect(|e| number_of_bytes += e.2.len()); + + let start_time = Instant::now(); + + let mem = &mut self.mem; + let result = MemOps::with_raw(iter, out, out_fail, |data| mem.phys_write_raw_iter(data)); + + self.writes + .add(start_time.elapsed().as_secs_f64(), number_of_bytes); + + //if self.writes.total_count() % 10000 == 0 { + if self.last_write_info.elapsed().as_secs_f64() >= 1f64 { + info!( + "Write Metrics: writes_per_second={} average_latency={:.4}ms; average_bytes={}; bytes_per_second={}", + self.writes.len(), + self.writes.average_latency().unwrap_or_default() * 1000f64, + self.writes.average_bytes().unwrap_or_default(), + self.writes.bandwidth().unwrap_or_default(), + ); + self.last_write_info = Instant::now(); + } + + result + } + + #[inline] + fn metadata(&self) -> PhysicalMemoryMetadata { + self.mem.metadata() + } + + #[inline] + fn set_mem_map(&mut self, mem_map: &[PhysicalMemoryMapping]) { + self.mem.set_mem_map(mem_map) + } +} + +#[cfg(feature = "plugins")] +::cglue::cglue_impl_group!( + PhysicalMemoryMetrics, + crate::plugins::ConnectorInstance, + {} +); + +/// This struct tracks latency and length of recent read and write operations. +/// +/// It has a minimum and maximum length, as well as a maximum storage time. +/// * The minimum length is to ensure you have enough data for an estimate. +/// * The maximum length is to make sure the history doesn't take up too much space. +/// * The maximum age is to make sure the estimate isn't outdated. +/// +/// Time difference between values can be zero, but never negative. +/// +/// This implementation is derived from (egui)[https://github.com/emilk/egui/blob/1c8cf9e3d59d8aee4c073b9e17695ee85c40bdbf/crates/emath/src/history.rs]. +#[derive(Clone, Debug)] +struct MemOpsHistory { + start_time: Instant, + + /// In elements, i.e. of `values.len()`. + /// The length is initially zero, but once past `min_len` will not shrink below it. + min_len: usize, + + /// In elements, i.e. of `values.len()`. + max_len: usize, + + /// In seconds. + max_age: f32, + + /// Total number of elements seen ever + total_count: u64, + + /// (time, value) pairs, oldest front, newest back. + /// Time difference between values can be zero, but never negative. + values: VecDeque<(f64, MemOpsHistoryEntry)>, +} + +#[derive(Clone, Copy, Debug)] +struct MemOpsHistoryEntry { + pub latency: f64, // secs + pub bytes: usize, // bytes +} + +#[allow(unused)] +impl MemOpsHistory { + pub fn new(length_range: std::ops::Range, max_age: f32) -> Self { + Self { + start_time: Instant::now(), + min_len: length_range.start, + max_len: length_range.end, + max_age, + total_count: 0, + values: Default::default(), + } + } + + #[inline] + pub fn max_len(&self) -> usize { + self.max_len + } + + #[inline] + pub fn max_age(&self) -> f32 { + self.max_age + } + + #[inline] + pub fn is_empty(&self) -> bool { + self.values.is_empty() + } + + /// Current number of values kept in history + #[inline] + pub fn len(&self) -> usize { + self.values.len() + } + + /// Total number of values seen. + /// Includes those that have been discarded due to `max_len` or `max_age`. + #[inline] + pub fn total_count(&self) -> u64 { + self.total_count + } + + #[inline] + pub fn clear(&mut self) { + self.values.clear(); + } + + /// Values must be added with a monotonically increasing time, or at least not decreasing. + pub fn add(&mut self, latency: f64, bytes: usize) { + let now = self.start_time.elapsed().as_secs_f64(); + if let Some((last_time, _)) = self.values.back() { + assert!(now >= *last_time, "Time shouldn't move backwards"); + } + self.total_count += 1; + self.values + .push_back((now, MemOpsHistoryEntry { latency, bytes })); + self.flush(); + } + + /// Mean time difference between values in this [`History`]. + pub fn mean_time_interval(&self) -> Option { + if let (Some(first), Some(last)) = (self.values.front(), self.values.back()) { + let n = self.len(); + if n >= 2 { + Some((last.0 - first.0) / ((n - 1) as f64)) + } else { + None + } + } else { + None + } + } + + // Mean number of events per second. + pub fn rate(&self) -> Option { + self.mean_time_interval().map(|time| 1.0 / time) + } + + /// Remove samples that are too old. + pub fn flush(&mut self) { + let now = self.start_time.elapsed().as_secs_f64(); + while self.values.len() > self.max_len { + self.values.pop_front(); + } + while self.values.len() > self.min_len { + if let Some((front_time, _)) = self.values.front() { + if *front_time < now - (self.max_age as f64) { + self.values.pop_front(); + } else { + break; + } + } else { + break; + } + } + } + + /// Returns the sum of all latencys + #[inline] + pub fn sum_latency(&self) -> f64 { + self.values.iter().map(|(_, value)| value.latency).sum() + } + + /// Returns the average latency + pub fn average_latency(&self) -> Option { + let num = self.len(); + if num > 0 { + Some(self.sum_latency() / (num as f64)) + } else { + None + } + } + + /// Returns the sum of bytes transmitted + #[inline] + pub fn sum_bytes(&self) -> usize { + self.values.iter().map(|(_, value)| value.bytes).sum() + } + + /// Returns the average number of bytes transmitted + pub fn average_bytes(&self) -> Option { + let num = self.len(); + if num > 0 { + Some((self.sum_bytes() as f64 / (num as f64)) as usize) + } else { + None + } + } + + /// Returns the number of bytes per second + pub fn bandwidth(&self) -> Option { + Some((self.average_bytes()? as f64 * self.rate()?) as usize) + } +} diff --git a/apex_dma/memflow_lib/memflow/src/mem/phys_mem/middleware/mod.rs b/apex_dma/memflow_lib/memflow/src/mem/phys_mem/middleware/mod.rs new file mode 100644 index 0000000..96ad0c2 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/mem/phys_mem/middleware/mod.rs @@ -0,0 +1,17 @@ +pub mod cache; + +#[cfg(feature = "std")] +pub mod delay; +#[cfg(feature = "std")] +pub mod metrics; + +#[doc(hidden)] +pub use cache::*; + +#[cfg(feature = "std")] +#[doc(hidden)] +pub use delay::*; + +#[cfg(feature = "std")] +#[doc(hidden)] +pub use metrics::*; diff --git a/apex_dma/memflow_lib/memflow/src/mem/phys_mem/mod.rs b/apex_dma/memflow_lib/memflow/src/mem/phys_mem/mod.rs new file mode 100644 index 0000000..15df164 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/mem/phys_mem/mod.rs @@ -0,0 +1,337 @@ +use crate::cglue::*; +use crate::dataview::{Pod, PodMethods}; +use crate::error::Result; +use crate::types::{umem, Address, PhysicalAddress}; + +use super::mem_data::*; +use super::PhysicalMemoryMapping; + +use std::prelude::v1::*; + +use crate::mem::memory_view::*; + +pub mod middleware; + +pub use middleware::*; + +// TODO: +// - check endianess here and return an error +// - better would be to convert endianess with word alignment from addr + +/// The [`PhysicalMemory`] trait is implemented by memory backends +/// and provides a generic way to read and write from/to physical memory. +/// +/// All addresses are of the type [`PhysicalAddress`](../types/physical_address/index.html) +/// and can contain additional information about the page the address resides in. +/// This information is usually only needed when implementing caches. +/// +/// There are only 2 methods which are required to be implemented by the provider of this trait. +/// +/// # Examples +/// +/// Implementing [`PhysicalMemory`] for a memory backend: +/// ``` +/// use std::vec::Vec; +/// use std::convert::TryInto; +/// +/// use memflow::mem::{ +/// MemoryMap, +/// PhysicalMemoryMapping, +/// phys_mem::{ +/// PhysicalMemory, +/// PhysicalMemoryMetadata, +/// }, +/// mem_data::{ +/// MemOps, +/// PhysicalReadMemOps, +/// PhysicalWriteMemOps, +/// opt_call, +/// } +/// }; +/// +/// use memflow::cglue::{CIterator, CTup2, CTup3}; +/// +/// use memflow::types::{PhysicalAddress, Address, umem}; +/// use memflow::error::Result; +/// +/// pub struct MemoryBackend { +/// mem: Box<[u8]>, +/// } +/// +/// impl PhysicalMemory for MemoryBackend { +/// fn phys_read_raw_iter( +/// &mut self, +/// MemOps { +/// inp, +/// mut out, +/// .. +/// }: PhysicalReadMemOps, +/// ) -> Result<()> { +/// inp +/// .for_each(|CTup3(addr, meta_addr, mut data)| { +/// let addr: usize = addr.to_umem().try_into().unwrap(); +/// let len = data.len(); +/// data.copy_from_slice(&self.mem[addr..(addr + len)]); +/// opt_call(out.as_deref_mut(), CTup2(meta_addr, data)); +/// }); +/// Ok(()) +/// } +/// +/// fn phys_write_raw_iter( +/// &mut self, +/// MemOps { +/// inp, +/// mut out, +/// .. +/// }: PhysicalWriteMemOps, +/// ) -> Result<()> { +/// inp +/// .for_each(|CTup3(addr, meta_addr, data)| { +/// let addr: usize = addr.to_umem().try_into().unwrap(); +/// let len = data.len(); +/// self.mem[addr..(addr + len)].copy_from_slice(&data); +/// opt_call(out.as_deref_mut(), CTup2(meta_addr, data)); +/// }); +/// Ok(()) +/// } +/// +/// fn metadata(&self) -> PhysicalMemoryMetadata { +/// PhysicalMemoryMetadata { +/// max_address: (self.mem.len() - 1).into(), +/// real_size: self.mem.len() as umem, +/// readonly: false, +/// ideal_batch_size: u32::MAX +/// } +/// } +/// } +/// ``` +/// +/// Reading from [`PhysicalMemory`]: +/// ``` +/// use memflow::types::Address; +/// use memflow::mem::PhysicalMemory; +/// +/// fn read(mem: &mut T) { +/// let mut addr = 0u64; +/// mem.phys_read_into(Address::from(0x1000).into(), &mut addr).unwrap(); +/// println!("addr: {:x}", addr); +/// } +/// +/// # use memflow::dummy::DummyMemory; +/// # use memflow::types::size; +/// # read(&mut DummyMemory::new(size::mb(4))); +/// ``` +#[cfg_attr(feature = "plugins", cglue_trait)] +#[int_result] +#[cglue_forward] +pub trait PhysicalMemory: Send { + fn phys_read_raw_iter(&mut self, data: PhysicalReadMemOps) -> Result<()>; + fn phys_write_raw_iter(&mut self, data: PhysicalWriteMemOps) -> Result<()>; + + /// Retrieve metadata about the physical memory + /// + /// This function will return metadata about the underlying physical memory object, currently + /// including address space size and read-only status. + /// + /// # Examples + /// + /// ``` + /// use memflow::types::{size, mem}; + /// use memflow::mem::PhysicalMemory; + /// # let mem = memflow::dummy::DummyMemory::new(size::mb(16)); + /// + /// let metadata = mem.metadata(); + /// + /// assert_eq!(metadata.max_address.to_umem(), mem::mb(16) - 1); + /// assert_eq!(metadata.real_size, mem::mb(16)); + /// assert_eq!(metadata.readonly, false); + /// ``` + fn metadata(&self) -> PhysicalMemoryMetadata; + + /// Sets the memory mapping for the physical memory + /// + /// In case a connector cannot acquire memory mappings on it's own this function + /// allows the OS plugin to set the memory mapping at a later stage of initialization. + /// + /// The only reason this is needed for some connectors is to avoid catastrophic failures upon reading invalid address. + /// + /// By default this is a no-op. + #[inline] + fn set_mem_map(&mut self, _mem_map: &[PhysicalMemoryMapping]) {} + + #[skip_func] + fn phys_read_into(&mut self, addr: PhysicalAddress, out: &mut T) -> Result<()> + where + Self: Sized, + { + MemOps::with( + std::iter::once((addr, CSliceMut::from(out.as_bytes_mut()))), + None, + Some( + &mut (&mut |CTup2(_, mut d): ReadData| { + d.iter_mut().for_each(|b| *b = 0); + true + }) + .into(), + ), + |data| self.phys_read_raw_iter(data), + ) + } + + #[skip_func] + fn phys_write(&mut self, addr: PhysicalAddress, data: &T) -> Result<()> + where + Self: Sized, + { + MemOps::with( + std::iter::once((addr, CSliceRef::from(data.as_bytes()))), + None, + None, + |data| self.phys_write_raw_iter(data), + ) + } + + // deprecated = Remove this function (superseeded by into_mem_view) + #[vtbl_only('static, wrap_with_obj(MemoryView))] + fn into_phys_view(self) -> PhysicalMemoryView + where + Self: Sized, + { + PhysicalMemoryView { + mem: self, + zero_fill_gaps: false, + } + } + + // deprecated = Remove this function (superseeded by mem_view) + #[vtbl_only('_, wrap_with_obj(MemoryView))] + fn phys_view(&mut self) -> PhysicalMemoryView> + where + Self: Sized, + { + self.forward_mut().into_mem_view() + } + + // deprecated = Expose this via cglue + #[skip_func] + //#[vtbl_only('static, wrap_with_obj(MemoryView))] + fn into_mem_view(self) -> PhysicalMemoryView + where + Self: Sized, + { + PhysicalMemoryView { + mem: self, + zero_fill_gaps: false, + } + } + + // deprecated = Expose this via cglue + #[skip_func] + //#[vtbl_only('_, wrap_with_obj(MemoryView))] + fn mem_view(&mut self) -> PhysicalMemoryView> + where + Self: Sized, + { + self.forward_mut().into_mem_view() + } +} + +#[repr(C)] +#[derive(Clone)] +#[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))] +pub struct PhysicalMemoryView { + mem: T, + zero_fill_gaps: bool, +} + +impl PhysicalMemoryView { + pub fn zero_fill_gaps(mut self) -> Self { + self.zero_fill_gaps = true; + self + } +} + +impl MemoryView for PhysicalMemoryView { + fn read_raw_iter<'a>( + &mut self, + MemOps { inp, out, out_fail }: ReadRawMemOps<'a, '_, '_, '_>, + ) -> Result<()> { + let inp = &mut inp.map(|CTup3(addr, meta_addr, data)| CTup3(addr.into(), meta_addr, data)); + let inp = inp.into(); + + #[allow(clippy::unnecessary_unwrap)] + if self.zero_fill_gaps && out.is_some() && out_fail.is_some() { + let out = std::cell::RefCell::new(out.unwrap()); + + let ma = self.mem.metadata().max_address; + + let out1 = &mut |data| out.borrow_mut().call(data); + let out = &mut |data| out.borrow_mut().call(data); + let out = &mut out.into(); + let out = Some(out); + + let out_fail = out_fail.unwrap(); + + let out_fail = &mut |mut data: ReadData<'a>| { + if data.0 < ma { + data.1.iter_mut().for_each(|b| *b = 0); + out1(data) + } else { + out_fail.call(data) + } + }; + + let out_fail = &mut out_fail.into(); + let out_fail = Some(out_fail); + + let data = MemOps { inp, out, out_fail }; + self.mem.phys_read_raw_iter(data) + } else { + let data = MemOps { inp, out, out_fail }; + self.mem.phys_read_raw_iter(data) + } + } + + fn write_raw_iter(&mut self, MemOps { inp, out, out_fail }: WriteRawMemOps) -> Result<()> { + let inp = &mut inp.map(|CTup3(addr, meta_addr, data)| CTup3(addr.into(), meta_addr, data)); + let inp = inp.into(); + + let data = MemOps { inp, out, out_fail }; + + self.mem.phys_write_raw_iter(data) + } + + fn metadata(&self) -> MemoryViewMetadata { + let PhysicalMemoryMetadata { + max_address, + real_size, + readonly, + .. + } = self.mem.metadata(); + + MemoryViewMetadata { + max_address, + real_size, + readonly, + #[cfg(target_pointer_width = "64")] + arch_bits: 64, + #[cfg(target_pointer_width = "32")] + arch_bits: 32, + #[cfg(target_endian = "little")] + little_endian: true, + #[cfg(target_endian = "big")] + little_endian: false, + } + } +} + +#[repr(C)] +#[derive(Debug, Clone, Copy)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +#[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))] +pub struct PhysicalMemoryMetadata { + pub max_address: Address, + pub real_size: umem, + pub readonly: bool, + pub ideal_batch_size: u32, +} diff --git a/apex_dma/memflow_lib/memflow/src/mem/phys_mem_batcher.rs b/apex_dma/memflow_lib/memflow/src/mem/phys_mem_batcher.rs deleted file mode 100644 index 128a710..0000000 --- a/apex_dma/memflow_lib/memflow/src/mem/phys_mem_batcher.rs +++ /dev/null @@ -1,93 +0,0 @@ -use std::prelude::v1::*; - -use crate::error::Result; -use crate::mem::phys_mem::{ - PhysicalMemory, PhysicalReadData, PhysicalReadIterator, PhysicalWriteData, - PhysicalWriteIterator, -}; -use crate::types::PhysicalAddress; - -use dataview::Pod; - -pub struct PhysicalMemoryBatcher<'a, T: PhysicalMemory> { - pmem: &'a mut T, - read_list: Vec>, - write_list: Vec>, -} - -impl<'a, T: PhysicalMemory> PhysicalMemoryBatcher<'a, T> { - pub fn new(pmem: &'a mut T) -> Self { - Self { - pmem, - read_list: vec![], - write_list: vec![], - } - } - - pub fn read_prealloc(&mut self, capacity: usize) -> &mut Self { - self.read_list.reserve(capacity); - self - } - - pub fn commit_rw(&mut self) -> Result<()> { - if !self.read_list.is_empty() { - self.pmem.phys_read_raw_list(&mut self.read_list)?; - self.read_list.clear(); - } - - if !self.write_list.is_empty() { - self.pmem.phys_write_raw_list(&self.write_list)?; - self.write_list.clear(); - } - - Ok(()) - } - - #[inline] - pub fn read_raw_iter>(&mut self, iter: VI) -> &mut Self { - self.read_list.extend(iter); - self - } - - #[inline] - pub fn write_raw_iter>(&mut self, iter: VI) -> &mut Self { - self.write_list.extend(iter); - self - } - - // read helpers - #[inline] - pub fn read_raw_into<'b: 'a>(&mut self, addr: PhysicalAddress, out: &'b mut [u8]) -> &mut Self { - self.read_raw_iter(Some(PhysicalReadData(addr, out)).into_iter()) - } - - #[inline] - pub fn read_into<'b: 'a, F: Pod + ?Sized>( - &mut self, - addr: PhysicalAddress, - out: &'b mut F, - ) -> &mut Self { - self.read_raw_into(addr, out.as_bytes_mut()) - } - - // write helpers - #[inline] - pub fn write_raw_into<'b: 'a>(&mut self, addr: PhysicalAddress, out: &'b [u8]) -> &mut Self { - self.write_raw_iter(Some(PhysicalWriteData(addr, out)).into_iter()) - } - - #[inline] - pub fn write_into<'b: 'a, F: Pod + ?Sized>( - &mut self, - addr: PhysicalAddress, - out: &'b F, - ) -> &mut Self { - self.write_raw_into(addr, out.as_bytes()) - } -} - -impl<'a, T: PhysicalMemory> Drop for PhysicalMemoryBatcher<'a, T> { - fn drop(&mut self) { - let _ = self.commit_rw(); - } -} diff --git a/apex_dma/memflow_lib/memflow/src/mem/virt_mem.rs b/apex_dma/memflow_lib/memflow/src/mem/virt_mem.rs deleted file mode 100644 index d89b9f3..0000000 --- a/apex_dma/memflow_lib/memflow/src/mem/virt_mem.rs +++ /dev/null @@ -1,262 +0,0 @@ -use std::prelude::v1::*; - -pub mod virtual_dma; -pub use virtual_dma::VirtualDMA; - -use super::VirtualMemoryBatcher; -use crate::architecture::ArchitectureObj; -use crate::error::{Error, PartialError, PartialResult, PartialResultExt, Result}; -use crate::types::{Address, Page, PhysicalAddress, Pointer32, Pointer64}; - -use std::mem::MaybeUninit; - -use dataview::Pod; - -/// The `VirtualMemory` trait implements access to virtual memory for a specific process -/// and provides a generic way to read and write from/to that processes virtual memory. -/// -/// The CPU accesses virtual memory by setting the CR3 register to the appropiate Directory Table Base (DTB) -/// for that process. The ntoskrnl.exe Kernel Process has it's own DTB. -/// Using the DTB it is possible to resolve the physical memory location of a virtual address page. -/// After the address has been resolved the physical memory page can then be read or written to. -/// -/// There are 3 methods which are required to be implemented by the provider of this trait. -/// -/// # Examples -/// -/// Reading from `VirtualMemory`: -/// ``` -/// use memflow::types::Address; -/// use memflow::mem::VirtualMemory; -/// -/// fn read(virt_mem: &mut T, read_addr: Address) { -/// let mut addr = 0u64; -/// virt_mem.virt_read_into(read_addr, &mut addr).unwrap(); -/// println!("addr: {:x}", addr); -/// # assert_eq!(addr, 0x00ff_00ff_00ff_00ff); -/// } -/// # use memflow::mem::dummy::DummyMemory; -/// # use memflow::types::size; -/// # let (mut mem, virt_base) = DummyMemory::new_virt(size::mb(4), size::mb(2), &[255, 0, 255, 0, 255, 0, 255, 0]); -/// # read(&mut mem, virt_base); -/// ``` -pub trait VirtualMemory -where - Self: Send, -{ - fn virt_read_raw_list(&mut self, data: &mut [VirtualReadData]) -> PartialResult<()>; - - fn virt_write_raw_list(&mut self, data: &[VirtualWriteData]) -> PartialResult<()>; - - fn virt_page_info(&mut self, addr: Address) -> Result; - - fn virt_translation_map_range( - &mut self, - start: Address, - end: Address, - ) -> Vec<(Address, usize, PhysicalAddress)>; - - fn virt_page_map_range( - &mut self, - gap_size: usize, - start: Address, - end: Address, - ) -> Vec<(Address, usize)>; - - // read helpers - fn virt_read_raw_into(&mut self, addr: Address, out: &mut [u8]) -> PartialResult<()> { - self.virt_read_raw_list(&mut [VirtualReadData(addr, out)]) - } - - fn virt_read_into(&mut self, addr: Address, out: &mut T) -> PartialResult<()> - where - Self: Sized, - { - self.virt_read_raw_into(addr, out.as_bytes_mut()) - } - - fn virt_read_raw(&mut self, addr: Address, len: usize) -> PartialResult> { - let mut buf = vec![0u8; len]; - self.virt_read_raw_into(addr, &mut *buf).map_data(|_| buf) - } - - /// # Safety - /// - /// this function will overwrite the contents of 'obj' so we can just allocate an unitialized memory section. - /// this function should only be used with [repr(C)] structs. - #[allow(clippy::uninit_assumed_init)] - fn virt_read(&mut self, addr: Address) -> PartialResult - where - Self: Sized, - { - let mut obj: T = unsafe { MaybeUninit::uninit().assume_init() }; - self.virt_read_into(addr, &mut obj).map_data(|_| obj) - } - - // write helpers - fn virt_write_raw(&mut self, addr: Address, data: &[u8]) -> PartialResult<()> { - self.virt_write_raw_list(&[VirtualWriteData(addr, data)]) - } - - fn virt_write(&mut self, addr: Address, data: &T) -> PartialResult<()> - where - Self: Sized, - { - self.virt_write_raw(addr, data.as_bytes()) - } - - // page map helpers - fn virt_translation_map(&mut self) -> Vec<(Address, usize, PhysicalAddress)> { - self.virt_translation_map_range(Address::null(), Address::invalid()) - } - - fn virt_page_map(&mut self, gap_size: usize) -> Vec<(Address, usize)> { - self.virt_page_map_range(gap_size, Address::null(), Address::invalid()) - } - - // specific read helpers - fn virt_read_addr32(&mut self, addr: Address) -> PartialResult
- where - Self: Sized, - { - self.virt_read::(addr).map_data(|d| d.into()) - } - - fn virt_read_addr64(&mut self, addr: Address) -> PartialResult
- where - Self: Sized, - { - self.virt_read::(addr).map_data(|d| d.into()) - } - - fn virt_read_addr_arch( - &mut self, - arch: ArchitectureObj, - addr: Address, - ) -> PartialResult
- where - Self: Sized, - { - match arch.bits() { - 64 => self.virt_read_addr64(addr), - 32 => self.virt_read_addr32(addr), - _ => Err(PartialError::Error(Error::InvalidArchitecture)), - } - } - - // read pointer wrappers - fn virt_read_ptr32_into( - &mut self, - ptr: Pointer32, - out: &mut U, - ) -> PartialResult<()> - where - Self: Sized, - { - self.virt_read_into(ptr.address.into(), out) - } - - fn virt_read_ptr32(&mut self, ptr: Pointer32) -> PartialResult - where - Self: Sized, - { - self.virt_read(ptr.address.into()) - } - - fn virt_read_ptr64_into( - &mut self, - ptr: Pointer64, - out: &mut U, - ) -> PartialResult<()> - where - Self: Sized, - { - self.virt_read_into(ptr.address.into(), out) - } - - fn virt_read_ptr64(&mut self, ptr: Pointer64) -> PartialResult - where - Self: Sized, - { - self.virt_read(ptr.address.into()) - } - - // TODO: read into slice? - // TODO: if len is shorter than string -> dynamically double length up to an upper bound - fn virt_read_cstr(&mut self, addr: Address, len: usize) -> PartialResult { - let mut buf = vec![0; len]; - self.virt_read_raw_into(addr, &mut buf).data_part()?; - if let Some((n, _)) = buf.iter().enumerate().find(|(_, c)| **c == 0_u8) { - buf.truncate(n); - } - Ok(String::from_utf8_lossy(&buf).to_string()) - } - - fn virt_batcher(&mut self) -> VirtualMemoryBatcher - where - Self: Sized, - { - VirtualMemoryBatcher::new(self) - } -} - -// forward impls -impl + Send> VirtualMemory for P { - #[inline] - fn virt_read_raw_list(&mut self, data: &mut [VirtualReadData]) -> PartialResult<()> { - (**self).virt_read_raw_list(data) - } - - #[inline] - fn virt_write_raw_list(&mut self, data: &[VirtualWriteData]) -> PartialResult<()> { - (**self).virt_write_raw_list(data) - } - - #[inline] - fn virt_page_info(&mut self, addr: Address) -> Result { - (**self).virt_page_info(addr) - } - - #[inline] - fn virt_translation_map_range( - &mut self, - start: Address, - end: Address, - ) -> Vec<(Address, usize, PhysicalAddress)> { - (**self).virt_translation_map_range(start, end) - } - - #[inline] - fn virt_page_map_range( - &mut self, - gap_size: usize, - start: Address, - end: Address, - ) -> Vec<(Address, usize)> { - (**self).virt_page_map_range(gap_size, start, end) - } -} - -// iterator helpers -#[repr(C)] -pub struct VirtualReadData<'a>(pub Address, pub &'a mut [u8]); -pub trait VirtualReadIterator<'a>: Iterator> + 'a {} -impl<'a, T: Iterator> + 'a> VirtualReadIterator<'a> for T {} - -impl<'a> From> for (Address, &'a mut [u8]) { - fn from(VirtualReadData(a, b): VirtualReadData<'a>) -> Self { - (a, b) - } -} - -#[repr(C)] -#[derive(Clone, Copy)] -pub struct VirtualWriteData<'a>(pub Address, pub &'a [u8]); -pub trait VirtualWriteIterator<'a>: Iterator> + 'a {} -impl<'a, T: Iterator> + 'a> VirtualWriteIterator<'a> for T {} - -impl<'a> From> for (Address, &'a [u8]) { - fn from(VirtualWriteData(a, b): VirtualWriteData<'a>) -> Self { - (a, b) - } -} diff --git a/apex_dma/memflow_lib/memflow/src/mem/virt_mem/mod.rs b/apex_dma/memflow_lib/memflow/src/mem/virt_mem/mod.rs new file mode 100644 index 0000000..eef296b --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/mem/virt_mem/mod.rs @@ -0,0 +1,4 @@ +pub mod virtual_dma; + +#[doc(hidden)] +pub use virtual_dma::VirtualDma; diff --git a/apex_dma/memflow_lib/memflow/src/mem/virt_mem/virtual_dma.rs b/apex_dma/memflow_lib/memflow/src/mem/virt_mem/virtual_dma.rs index fbfa4a7..cf93c99 100644 --- a/apex_dma/memflow_lib/memflow/src/mem/virt_mem/virtual_dma.rs +++ b/apex_dma/memflow_lib/memflow/src/mem/virt_mem/virtual_dma.rs @@ -1,23 +1,28 @@ use std::prelude::v1::*; -use super::{VirtualReadData, VirtualWriteData}; -use crate::architecture::{ArchitectureObj, ScopedVirtualTranslate}; -use crate::error::{Error, PartialError, PartialResult, Result}; -use crate::iter::FnExtend; +use crate::architecture::{ArchitectureObj, Endianess}; +use crate::error::{Error, Result, *}; +use crate::mem::memory_view::*; use crate::mem::{ - virt_translate::{DirectTranslate, VirtualTranslate}, - PhysicalMemory, PhysicalReadData, PhysicalWriteData, VirtualMemory, + mem_data::*, + virt_translate::{ + DirectTranslate, VirtualTranslate, VirtualTranslate2, VirtualTranslate3, + VirtualTranslation, VirtualTranslationCallback, VirtualTranslationFail, + VirtualTranslationFailCallback, + }, + MemoryView, PhysicalMemory, PhysicalMemoryMetadata, }; -use crate::types::{Address, Page, PhysicalAddress}; +use crate::types::{umem, Address, PhysicalAddress}; +use cglue::tuple::*; use bumpalo::{collections::Vec as BumpVec, Bump}; -use itertools::Itertools; +use cglue::callback::FromExtend; -/// The `VirtualDMA` struct provides a default implementation to access virtual memory -/// from user provided `PhysicalMemory` and `VirtualTranslate` objects. +/// The VirtualDma struct provides a default implementation to access virtual memory +/// from user provided [`PhysicalMemory`] and [`VirtualTranslate2`] objects. /// -/// This struct implements `VirtualMemory` and allows the user to access the virtual memory of a process. -pub struct VirtualDMA { +/// This struct implements [`MemoryView`] and allows the user to access the virtual memory of a process. +pub struct VirtualDma { phys_mem: T, vat: V, proc_arch: ArchitectureObj, @@ -25,85 +30,91 @@ pub struct VirtualDMA { arena: Bump, } -impl VirtualDMA { - /// Constructs a `VirtualDMA` object from user supplied architectures and DTB. - /// It creates a default `VirtualTranslate` object using the `DirectTranslate` struct. +impl VirtualDma { + /// Constructs a `VirtualDma` object from user supplied architectures and DTB. + /// It creates a default `VirtualTranslate2` object using the `DirectTranslate` struct. /// /// If you want to use a cache for translating virtual to physical memory - /// consider using the `VirtualDMA::with_vat()` function and supply your own `VirtualTranslate` object. + /// consider using the `VirtualDma::with_vat()` function and supply your own `VirtualTranslate2` object. /// /// # Examples /// - /// Constructing a `VirtualDMA` object with a given dtb and using it to read: + /// Constructing a `VirtualDma` object with a given dtb and using it to read: /// ``` /// use memflow::types::Address; /// use memflow::architecture::x86::x64; - /// use memflow::mem::{PhysicalMemory, VirtualTranslate, VirtualMemory, VirtualDMA}; + /// use memflow::mem::{PhysicalMemory, VirtualTranslate2, MemoryView, VirtualDma}; + /// use memflow::cglue::Fwd; /// - /// fn read(phys_mem: &mut T, vat: &mut V, dtb: Address, read_addr: Address) { + /// fn read(phys_mem: Fwd<&mut impl PhysicalMemory>, vat: &mut impl VirtualTranslate2, dtb: Address, read_addr: Address) { /// let arch = x64::ARCH; /// let translator = x64::new_translator(dtb); /// - /// let mut virt_mem = VirtualDMA::new(phys_mem, arch, translator); + /// let mut virt_mem = VirtualDma::new(phys_mem, arch, translator); /// /// let mut addr = 0u64; - /// virt_mem.virt_read_into(read_addr, &mut addr).unwrap(); + /// virt_mem.read_into(read_addr, &mut addr).unwrap(); /// println!("addr: {:x}", addr); /// # assert_eq!(addr, 0x00ff_00ff_00ff_00ff); /// } - /// # use memflow::mem::dummy::DummyMemory; + /// # use memflow::dummy::{DummyMemory, DummyOs}; /// # use memflow::types::size; /// # use memflow::mem::DirectTranslate; - /// # let (mut mem, dtb, virt_base) = DummyMemory::new_and_dtb(size::mb(4), size::mb(2), &[255, 0, 255, 0, 255, 0, 255, 0]); + /// # use memflow::cglue::ForwardMut; + /// # let mem = DummyMemory::new(size::mb(4)); + /// # let (mut os, dtb, virt_base) = DummyOs::new_and_dtb(mem, size::mb(2), &[255, 0, 255, 0, 255, 0, 255, 0]); /// # let mut vat = DirectTranslate::new(); - /// # read(&mut mem, &mut vat, dtb, virt_base); + /// # read(os.forward_mut(), &mut vat, dtb, virt_base); /// ``` - pub fn new(phys_mem: T, proc_arch: ArchitectureObj, translator: D) -> Self { + pub fn new(phys_mem: T, arch: impl Into, translator: D) -> Self { Self { phys_mem, vat: DirectTranslate::new(), - proc_arch, + proc_arch: arch.into(), translator, arena: Bump::new(), } } } -impl VirtualDMA { - /// This function constructs a `VirtualDMA` instance with a user supplied `VirtualTranslate` object. - /// It can be used when working with cached virtual to physical translations such as a TLB. +impl VirtualDma { + /// This function constructs a `VirtualDma` instance with a user supplied `VirtualTranslate2` object. + /// It can be used when working with cached virtual to physical translations such as a Tlb. /// /// # Examples /// - /// Constructing a `VirtualDMA` object with VAT and using it to read: + /// Constructing a `VirtualDma` object with VAT and using it to read: /// ``` /// use memflow::types::Address; /// use memflow::architecture::x86::x64; - /// use memflow::mem::{PhysicalMemory, VirtualTranslate, VirtualMemory, VirtualDMA}; + /// use memflow::mem::{PhysicalMemory, VirtualTranslate2, MemoryView, VirtualDma}; + /// use memflow::cglue::Fwd; /// - /// fn read(phys_mem: &mut T, vat: V, dtb: Address, read_addr: Address) { + /// fn read(phys_mem: Fwd<&mut impl PhysicalMemory>, vat: impl VirtualTranslate2, dtb: Address, read_addr: Address) { /// let arch = x64::ARCH; /// let translator = x64::new_translator(dtb); /// - /// let mut virt_mem = VirtualDMA::with_vat(phys_mem, arch, translator, vat); + /// let mut virt_mem = VirtualDma::with_vat(phys_mem, arch, translator, vat); /// /// let mut addr = 0u64; - /// virt_mem.virt_read_into(read_addr, &mut addr).unwrap(); + /// virt_mem.read_into(read_addr, &mut addr).unwrap(); /// println!("addr: {:x}", addr); /// # assert_eq!(addr, 0x00ff_00ff_00ff_00ff); /// } - /// # use memflow::mem::dummy::DummyMemory; + /// # use memflow::dummy::{DummyMemory, DummyOs}; /// # use memflow::types::size; /// # use memflow::mem::DirectTranslate; - /// # let (mut mem, dtb, virt_base) = DummyMemory::new_and_dtb(size::mb(4), size::mb(2), &[255, 0, 255, 0, 255, 0, 255, 0]); + /// # use memflow::cglue::ForwardMut; + /// # let mem = DummyMemory::new(size::mb(4)); + /// # let (mut os, dtb, virt_base) = DummyOs::new_and_dtb(mem, size::mb(2), &[255, 0, 255, 0, 255, 0, 255, 0]); /// # let mut vat = DirectTranslate::new(); - /// # read(&mut mem, &mut vat, dtb, virt_base); + /// # read(os.forward_mut(), &mut vat, dtb, virt_base); /// ``` - pub fn with_vat(phys_mem: T, proc_arch: ArchitectureObj, translator: D, vat: V) -> Self { + pub fn with_vat(phys_mem: T, arch: impl Into, translator: D, vat: V) -> Self { Self { phys_mem, vat, - proc_arch, + proc_arch: arch.into(), translator, arena: Bump::new(), } @@ -119,27 +130,57 @@ impl VirtualD self.proc_arch } - /// Returns the Directory Table Base of this process. - pub fn translator(&self) -> &impl ScopedVirtualTranslate { + /// Replaces current process architecture with a new one. + pub fn set_proc_arch(&mut self, new_arch: ArchitectureObj) -> ArchitectureObj { + core::mem::replace(&mut self.proc_arch, new_arch) + } + + /// Returns the Directory Table Base of this process.. + pub fn translator(&self) -> &D { &self.translator } - /// A wrapper around `virt_read_addr64` and `virt_read_addr32` that will use the pointer size of this context's process. - pub fn virt_read_addr(&mut self, addr: Address) -> PartialResult
{ + /// Replace current translator with a new one. + pub fn set_translator(&mut self, new_translator: D) -> D { + core::mem::replace(&mut self.translator, new_translator) + } + + /// A wrapper around `read_addr64` and `read_addr32` that will use the pointer size of this context's process. + /// TODO: do this in virt mem + pub fn read_addr(&mut self, addr: Address) -> PartialResult
{ match self.proc_arch.bits() { - 64 => self.virt_read_addr64(addr), - 32 => self.virt_read_addr32(addr), - _ => Err(PartialError::Error(Error::InvalidArchitecture)), + 64 => self.read_addr64(addr), + 32 => self.read_addr32(addr), + _ => Err(PartialError::Error(Error( + ErrorOrigin::VirtualMemory, + ErrorKind::InvalidArchitecture, + ))), } } - /// Consume the self object and returns the containing memory connection - pub fn destroy(self) -> T { - self.phys_mem + /// Consumes this VirtualDma object, returning the underlying memory and vat objects + pub fn into_inner(self) -> (T, V) { + (self.phys_mem, self.vat) + } + + pub fn mem_vat_pair(&mut self) -> (&mut T, &mut V) { + (&mut self.phys_mem, &mut self.vat) + } + + pub fn phys_mem(&mut self) -> &mut T { + &mut self.phys_mem + } + + pub fn phys_mem_ref(&self) -> &T { + &self.phys_mem + } + + pub fn vat(&mut self) -> &mut V { + &mut self.vat } } -impl Clone for VirtualDMA +impl Clone for VirtualDma where T: Clone, V: Clone, @@ -156,124 +197,113 @@ where } } -impl VirtualMemory - for VirtualDMA +#[allow(clippy::needless_option_as_deref)] +impl MemoryView + for VirtualDma { - fn virt_read_raw_list(&mut self, data: &mut [VirtualReadData]) -> PartialResult<()> { + fn read_raw_iter<'a>( + &mut self, + MemOps { + inp, + out, + mut out_fail, + }: ReadRawMemOps, + ) -> Result<()> { self.arena.reset(); - let mut translation = BumpVec::with_capacity_in(data.len(), &self.arena); - let mut partial_read = false; - self.vat.virt_to_phys_iter( - &mut self.phys_mem, - &self.translator, - data.iter_mut() - .map(|VirtualReadData(a, b)| (*a, &mut b[..])), - &mut FnExtend::new(|(a, b)| translation.push(PhysicalReadData(a, b))), - &mut FnExtend::new(|(_, _, out): (_, _, &mut [u8])| { - for v in out.iter_mut() { - *v = 0; - } - partial_read = true; - }), - ); - - self.phys_mem.phys_read_raw_list(&mut translation)?; - if !partial_read { - Ok(()) - } else { - Err(PartialError::PartialVirtualRead(())) - } - } - - fn virt_write_raw_list(&mut self, data: &[VirtualWriteData]) -> PartialResult<()> { - self.arena.reset(); - let mut translation = BumpVec::with_capacity_in(data.len(), &self.arena); + let mut translation = BumpVec::with_capacity_in(inp.size_hint().0, &self.arena); + let phys_mem = &mut self.phys_mem; - let mut partial_read = false; self.vat.virt_to_phys_iter( - &mut self.phys_mem, + phys_mem, &self.translator, - data.iter().copied().map(<_>::into), - &mut FnExtend::new(|(a, b)| translation.push(PhysicalWriteData(a, b))), - &mut FnExtend::new(|(_, _, _): (_, _, _)| { - partial_read = true; - }), + inp, + &mut translation.from_extend(), + &mut (&mut |(_, CTup3(_, meta, buf)): (_, _)| { + opt_call(out_fail.as_deref_mut(), CTup2(meta, buf)) + }) + .into(), ); - self.phys_mem.phys_write_raw_list(&translation)?; - if !partial_read { - Ok(()) - } else { - Err(PartialError::PartialVirtualRead(())) - } - } - - fn virt_page_info(&mut self, addr: Address) -> Result { - let paddr = self - .vat - .virt_to_phys(&mut self.phys_mem, &self.translator, addr)?; - Ok(paddr.containing_page()) + MemOps::with_raw(translation.into_iter(), out, out_fail, |data| { + phys_mem.phys_read_raw_iter(data) + }) } - fn virt_translation_map_range( + fn write_raw_iter( &mut self, - start: Address, - end: Address, - ) -> Vec<(Address, usize, PhysicalAddress)> { + MemOps { + inp, + out, + mut out_fail, + }: WriteRawMemOps, + ) -> Result<()> { self.arena.reset(); - let mut out = BumpVec::new_in(&self.arena); + + let mut translation = BumpVec::with_capacity_in(inp.size_hint().0, &self.arena); + let phys_mem = &mut self.phys_mem; self.vat.virt_to_phys_iter( - &mut self.phys_mem, + phys_mem, &self.translator, - Some((start, (start, end - start))).into_iter(), - &mut out, - &mut FnExtend::void(), + inp, + &mut translation.from_extend(), + &mut (&mut |(_, CTup3(_, meta, buf)): (_, _)| { + opt_call(out_fail.as_deref_mut(), CTup2(meta, buf)) + }) + .into(), ); - out.sort_by(|(_, (a, _)), (_, (b, _))| a.cmp(b)); + MemOps::with_raw(translation.into_iter(), out, out_fail, |data| { + phys_mem.phys_write_raw_iter(data) + }) + } + + fn metadata(&self) -> MemoryViewMetadata { + let PhysicalMemoryMetadata { + max_address, + real_size, + readonly, + .. + } = self.phys_mem.metadata(); - out.into_iter() - .coalesce(|(ap, av), (bp, bv)| { - if bv.0 == (av.0 + av.1) && bp.address() == (ap.address() + av.1) { - Ok((ap, (av.0, bv.0 + bv.1 - av.0))) - } else { - Err(((ap, av), (bp, bv))) - } - }) - .map(|(p, (v, s))| (v, s, p)) - .collect() + MemoryViewMetadata { + max_address, + real_size, + readonly, + little_endian: self.proc_arch.endianess() == Endianess::LittleEndian, + arch_bits: self.proc_arch.bits(), + } } +} - fn virt_page_map_range( +impl VirtualTranslate + for VirtualDma +{ + fn virt_to_phys_list( &mut self, - gap_length: usize, - start: Address, - end: Address, - ) -> Vec<(Address, usize)> { - self.arena.reset(); - let mut out = BumpVec::new_in(&self.arena); - + addrs: &[VtopRange], + mut out: VirtualTranslationCallback, + mut out_fail: VirtualTranslationFailCallback, + ) { self.vat.virt_to_phys_iter( &mut self.phys_mem, &self.translator, - Some((start, (start, end - start))).into_iter(), - &mut out, - &mut FnExtend::void(), - ); - - out.sort_by(|(_, (a, _)), (_, (b, _))| a.cmp(b)); - - out.into_iter() - .map(|(_, a)| a) - .coalesce(|a, b| { - if b.0 - (a.0 + a.1) <= gap_length { - Ok((a.0, b.0 + b.1 - a.0)) - } else { - Err((a, b)) - } + addrs + .iter() + .map(|&CTup2(address, size)| CTup3(address, address, size)), + &mut (&mut |CTup3(a, b, c): CTup3| { + out.call(VirtualTranslation { + in_virtual: b, + size: c, + out_physical: a, + }) + }) + .into(), + &mut (&mut |(_e, CTup3(from, _, size))| { + out_fail.call(VirtualTranslationFail { from, size }) }) - .collect() + .into(), + ) } } diff --git a/apex_dma/memflow_lib/memflow/src/mem/virt_mem_batcher.rs b/apex_dma/memflow_lib/memflow/src/mem/virt_mem_batcher.rs deleted file mode 100644 index 512ee9a..0000000 --- a/apex_dma/memflow_lib/memflow/src/mem/virt_mem_batcher.rs +++ /dev/null @@ -1,77 +0,0 @@ -use std::prelude::v1::*; - -use crate::error::PartialResult; -use crate::mem::virt_mem::{ - VirtualMemory, VirtualReadData, VirtualReadIterator, VirtualWriteData, VirtualWriteIterator, -}; -use crate::types::Address; - -use dataview::Pod; - -pub struct VirtualMemoryBatcher<'a, T: VirtualMemory> { - vmem: &'a mut T, - read_list: Vec>, - write_list: Vec>, -} - -impl<'a, T: VirtualMemory> VirtualMemoryBatcher<'a, T> { - pub fn new(vmem: &'a mut T) -> Self { - Self { - vmem, - read_list: vec![], - write_list: vec![], - } - } - - pub fn commit_rw(&mut self) -> PartialResult<()> { - if !self.read_list.is_empty() { - self.vmem.virt_read_raw_list(&mut self.read_list)?; - self.read_list.clear(); - } - - if !self.write_list.is_empty() { - self.vmem.virt_write_raw_list(&self.write_list)?; - self.write_list.clear(); - } - - Ok(()) - } - - pub fn read_raw_iter>(&mut self, iter: VI) -> &mut Self { - self.read_list.extend(iter); - self - } - - pub fn write_raw_iter>(&mut self, iter: VI) -> &mut Self { - self.write_list.extend(iter); - self - } - - // read helpers - pub fn read_raw_into<'b: 'a>(&mut self, addr: Address, out: &'b mut [u8]) -> &mut Self { - self.read_raw_iter(Some(VirtualReadData(addr, out)).into_iter()) - } - - pub fn read_into<'b: 'a, F: Pod + ?Sized>( - &mut self, - addr: Address, - out: &'b mut F, - ) -> &mut Self { - self.read_raw_into(addr, out.as_bytes_mut()) - } - - // write helpers - pub fn write_raw_into<'b: 'a>(&mut self, addr: Address, out: &'b [u8]) -> &mut Self { - self.write_raw_iter(Some(VirtualWriteData(addr, out)).into_iter()) - } - - pub fn write_into<'b: 'a, F: Pod + ?Sized>(&mut self, addr: Address, out: &'b F) -> &mut Self { - self.write_raw_into(addr, out.as_bytes()) - } -} - -impl<'a, T: VirtualMemory> Drop for VirtualMemoryBatcher<'a, T> { - fn drop(&mut self) { - let _ = self.commit_rw(); - } -} diff --git a/apex_dma/memflow_lib/memflow/src/mem/virt_translate.rs b/apex_dma/memflow_lib/memflow/src/mem/virt_translate.rs deleted file mode 100644 index 3b0ff88..0000000 --- a/apex_dma/memflow_lib/memflow/src/mem/virt_translate.rs +++ /dev/null @@ -1,139 +0,0 @@ -use std::prelude::v1::*; - -pub mod direct_translate; -use crate::iter::SplitAtIndex; -pub use direct_translate::DirectTranslate; - -#[cfg(test)] -mod tests; - -use crate::error::{Error, Result}; - -use crate::mem::PhysicalMemory; -use crate::types::{Address, PhysicalAddress}; - -use crate::architecture::ScopedVirtualTranslate; - -pub trait VirtualTranslate -where - Self: Send, -{ - /// This function will do a virtual to physical memory translation for the - /// `ScopedVirtualTranslate` over multiple elements. - /// - /// In most cases, you will want to use the `VirtualDMA`, but this trait is provided if needed - /// to implement some more advanced filtering. - /// - /// # Examples - /// - /// ``` - /// # use memflow::error::Result; - /// # use memflow::types::{PhysicalAddress, Address}; - /// # use memflow::mem::dummy::DummyMemory; - /// use memflow::types::size; - /// use memflow::architecture::x86::x64; - /// use memflow::iter::FnExtend; - /// use memflow::mem::{VirtualTranslate, DirectTranslate}; - /// - /// # const VIRT_MEM_SIZE: usize = size::mb(8); - /// # const CHUNK_SIZE: usize = 2; - /// # - /// # let mut mem = DummyMemory::new(size::mb(16)); - /// # let (dtb, virtual_base) = mem.alloc_dtb(VIRT_MEM_SIZE, &[]); - /// # let translator = x64::new_translator(dtb); - /// let arch = x64::ARCH; - /// - /// let mut buffer = vec![0; VIRT_MEM_SIZE * CHUNK_SIZE / arch.page_size()]; - /// let buffer_length = buffer.len(); - /// - /// // In this example, 8 megabytes starting from `virtual_base` are mapped in. - /// // We translate 2 bytes chunks over the page boundaries. These bytes will be - /// // split off into 2 separate translated chunks. - /// let addresses = buffer - /// .chunks_mut(CHUNK_SIZE) - /// .enumerate() - /// .map(|(i, buf)| (virtual_base + ((i + 1) * size::kb(4) - 1), buf)); - /// - /// let mut translated_data = vec![]; - /// let mut failed_translations = FnExtend::void(); - /// - /// let mut direct_translate = DirectTranslate::new(); - /// - /// direct_translate.virt_to_phys_iter( - /// &mut mem, - /// &translator, - /// addresses, - /// &mut translated_data, - /// &mut failed_translations, - /// ); - /// - /// - /// // We tried to translate one byte out of the mapped memory, it had to fail - /// assert_eq!(translated_data.len(), buffer_length - 1); - /// - /// # Ok::<(), memflow::error::Error>(()) - /// ``` - fn virt_to_phys_iter( - &mut self, - phys_mem: &mut T, - translator: &D, - addrs: VI, - out: &mut VO, - out_fail: &mut FO, - ) where - T: PhysicalMemory + ?Sized, - B: SplitAtIndex, - D: ScopedVirtualTranslate, - VI: Iterator, - VO: Extend<(PhysicalAddress, B)>, - FO: Extend<(Error, Address, B)>; - - // helpers - fn virt_to_phys( - &mut self, - phys_mem: &mut T, - translator: &D, - vaddr: Address, - ) -> Result { - let mut vec = vec![]; //Vec::new_in(&arena); - let mut vec_fail = vec![]; //BumpVec::new_in(&arena); - self.virt_to_phys_iter( - phys_mem, - translator, - Some((vaddr, 1)).into_iter(), - &mut vec, - &mut vec_fail, - ); - if let Some(ret) = vec.pop() { - Ok(ret.0) - } else { - Err(vec_fail.pop().unwrap().0) - } - } -} - -// forward impls -impl<'a, T, P> VirtualTranslate for P -where - T: VirtualTranslate + ?Sized, - P: std::ops::DerefMut + Send, -{ - #[inline] - fn virt_to_phys_iter( - &mut self, - phys_mem: &mut U, - translator: &D, - addrs: VI, - out: &mut VO, - out_fail: &mut FO, - ) where - U: PhysicalMemory + ?Sized, - B: SplitAtIndex, - D: ScopedVirtualTranslate, - VI: Iterator, - VO: Extend<(PhysicalAddress, B)>, - FO: Extend<(Error, Address, B)>, - { - (**self).virt_to_phys_iter(phys_mem, translator, addrs, out, out_fail) - } -} diff --git a/apex_dma/memflow_lib/memflow/src/mem/cache/cached_vat.rs b/apex_dma/memflow_lib/memflow/src/mem/virt_translate/cache/mod.rs similarity index 58% rename from apex_dma/memflow_lib/memflow/src/mem/cache/cached_vat.rs rename to apex_dma/memflow_lib/memflow/src/mem/virt_translate/cache/mod.rs index 99fdd26..44407f0 100644 --- a/apex_dma/memflow_lib/memflow/src/mem/cache/cached_vat.rs +++ b/apex_dma/memflow_lib/memflow/src/mem/virt_translate/cache/mod.rs @@ -1,12 +1,19 @@ -use crate::error::{Error, Result}; +use crate::error::{Error, ErrorKind, ErrorOrigin, Result}; -use super::tlb_cache::TLBCache; -use crate::architecture::{ArchitectureObj, ScopedVirtualTranslate}; +mod tlb_cache; + +use crate::architecture::ArchitectureObj; use crate::iter::{PageChunks, SplitAtIndex}; -use crate::mem::cache::{CacheValidator, DefaultCacheValidator}; -use crate::mem::virt_translate::VirtualTranslate; +use crate::mem::virt_translate::VirtualTranslate2; use crate::mem::PhysicalMemory; -use crate::types::{Address, PhysicalAddress}; +use crate::types::cache::{CacheValidator, DefaultCacheValidator}; +use crate::types::{umem, Address}; +use cglue::tuple::*; +use tlb_cache::TlbCache; + +use super::{VirtualTranslate3, VtopFailureCallback, VtopOutputCallback}; + +use cglue::callback::FromExtend; use bumpalo::{collections::Vec as BumpVec, Bump}; @@ -21,14 +28,16 @@ use bumpalo::{collections::Vec as BumpVec, Bump}; /// /// /// ``` -/// use memflow::mem::cache::CachedVirtualTranslate; +/// use memflow::mem::CachedVirtualTranslate; /// # use memflow::architecture::x86::x64; -/// # use memflow::mem::dummy::DummyMemory; -/// # use memflow::mem::{DirectTranslate, VirtualDMA, VirtualMemory, VirtualTranslate}; +/// # use memflow::dummy::{DummyMemory, DummyOs}; +/// # use memflow::mem::{DirectTranslate, VirtualDma, MemoryView, VirtualTranslate2}; /// # use memflow::types::size; -/// # let mut mem = DummyMemory::new(size::mb(32)); +/// # let mem = DummyMemory::new(size::mb(32)); +/// # let mut os = DummyOs::new(mem); /// # let virt_size = size::mb(8); -/// # let (dtb, virt_base) = mem.alloc_dtb(virt_size, &[]); +/// # let (dtb, virt_base) = os.alloc_dtb(virt_size, &[]); +/// # let mut mem = os.into_inner(); /// # let translator = x64::new_translator(dtb); /// # let mut vat = DirectTranslate::new(); /// let mut cached_vat = CachedVirtualTranslate::builder(&mut vat) @@ -37,18 +46,20 @@ use bumpalo::{collections::Vec as BumpVec, Bump}; /// .unwrap(); /// ``` /// -/// Testing that cached translation is 4x faster than uncached translation when having a cache hit: +/// Testing that cached translation is at least 2x faster than uncached translation when having a cache hit: /// /// ``` /// use std::time::{Duration, Instant}; -/// # use memflow::mem::cache::CachedVirtualTranslate; +/// # use memflow::mem::CachedVirtualTranslate; /// # use memflow::architecture::x86::x64; -/// # use memflow::mem::dummy::DummyMemory; -/// # use memflow::mem::{DirectTranslate, VirtualDMA, VirtualMemory, VirtualTranslate}; +/// # use memflow::dummy::{DummyMemory, DummyOs}; +/// # use memflow::mem::{DirectTranslate, VirtualDma, MemoryView, VirtualTranslate2}; /// # use memflow::types::size; -/// # let mut mem = DummyMemory::new(size::mb(32)); +/// # let mem = DummyMemory::new(size::mb(32)); +/// # let mut os = DummyOs::new(mem); /// # let virt_size = size::mb(8); -/// # let (dtb, virt_base) = mem.alloc_dtb(virt_size, &[]); +/// # let (dtb, virt_base) = os.alloc_dtb(virt_size, &[]); +/// # let mut mem = os.into_inner(); /// # let translator = x64::new_translator(dtb); /// # let mut vat = DirectTranslate::new(); /// # let mut cached_vat = CachedVirtualTranslate::builder(&mut vat) @@ -58,7 +69,7 @@ use bumpalo::{collections::Vec as BumpVec, Bump}; /// /// let translation_address = virt_base; /// -/// let iter_count = 512; +/// let iter_count = 1024; /// /// let avg_cached = (0..iter_count).map(|_| { /// let timer = Instant::now(); @@ -84,19 +95,19 @@ use bumpalo::{collections::Vec as BumpVec, Bump}; /// /// println!("{:?}", avg_uncached); /// -/// assert!(avg_cached * 4 <= avg_uncached); +/// assert!(avg_cached * 9 <= avg_uncached * 7); /// ``` pub struct CachedVirtualTranslate { vat: V, - tlb: TLBCache, + tlb: TlbCache, arch: ArchitectureObj, arena: Bump, - pub hitc: usize, - pub misc: usize, + pub hitc: umem, + pub misc: umem, } -impl CachedVirtualTranslate { - pub fn new(vat: V, tlb: TLBCache, arch: ArchitectureObj) -> Self { +impl CachedVirtualTranslate { + pub fn new(vat: V, tlb: TlbCache, arch: ArchitectureObj) -> Self { Self { vat, tlb, @@ -108,13 +119,13 @@ impl CachedVirtualTranslate { } } -impl CachedVirtualTranslate { +impl CachedVirtualTranslate { pub fn builder(vat: V) -> CachedVirtualTranslateBuilder { CachedVirtualTranslateBuilder::new(vat) } } -impl Clone +impl Clone for CachedVirtualTranslate { fn clone(&self) -> Self { @@ -129,21 +140,19 @@ impl Clone } } -impl VirtualTranslate for CachedVirtualTranslate { - fn virt_to_phys_iter( +impl VirtualTranslate2 for CachedVirtualTranslate { + fn virt_to_phys_iter( &mut self, phys_mem: &mut T, translator: &D, addrs: VI, - out: &mut VO, - out_fail: &mut FO, + out: &mut VtopOutputCallback, + out_fail: &mut VtopFailureCallback, ) where T: PhysicalMemory + ?Sized, B: SplitAtIndex, - D: ScopedVirtualTranslate, - VI: Iterator, - VO: Extend<(PhysicalAddress, B)>, - FO: Extend<(Error, Address, B)>, + D: VirtualTranslate3, + VI: Iterator>, { self.tlb.validator.update_validity(); self.arena.reset(); @@ -159,33 +168,34 @@ impl VirtualTranslate for CachedVirtualT let arch = self.arch; let mut addrs = addrs - .filter_map(|(addr, buf)| { - if tlb.is_read_too_long(arch, buf.length()) { - uncached_in.push((addr, buf)); + .filter_map(|CTup3(addr, meta_addr, buf)| { + if tlb.is_read_too_long(arch, buf.length() as umem) { + uncached_in.push(CTup3(addr, meta_addr, buf)); None } else { - Some((addr, buf)) + Some((addr, meta_addr, buf)) } }) - .flat_map(|(addr, buf)| { - buf.page_chunks_by(addr, arch.page_size(), |addr, split, _| { + .flat_map(|(addr, meta_addr, buf)| { + (meta_addr, buf).page_chunks_by(addr, arch.page_size(), |addr, (_, split), _| { tlb.try_entry(translator, addr + split.length(), arch) .is_some() || tlb.try_entry(translator, addr, arch).is_some() }) }) - .filter_map(|(addr, buf)| { + .filter_map(|(addr, (meta_addr, buf))| { if let Some(entry) = tlb.try_entry(translator, addr, arch) { hitc += 1; - debug_assert!(buf.length() <= arch.page_size()); - match entry { - Ok(entry) => out.extend(Some((entry.phys_addr, buf))), - Err(error) => out_fail.extend(Some((error, addr, buf))), - } + debug_assert!(buf.length() <= arch.page_size() as umem); + // TODO: handle case + let _ = match entry { + Ok(entry) => out.call(CTup3(entry.phys_addr, meta_addr, buf)), + Err(error) => out_fail.call((error, CTup3(addr, meta_addr, buf))), + }; None } else { - misc += core::cmp::max(1, buf.length() / arch.page_size()); - Some((addr, (addr, buf))) + misc += core::cmp::max(1, buf.length() / arch.page_size() as umem); + Some(CTup3(addr, meta_addr, (addr, buf))) } }) .peekable(); @@ -195,8 +205,8 @@ impl VirtualTranslate for CachedVirtualT phys_mem, translator, addrs, - &mut uncached_out, - &mut uncached_out_fail, + &mut uncached_out.from_extend(), + &mut uncached_out_fail.from_extend(), ); } @@ -206,15 +216,21 @@ impl VirtualTranslate for CachedVirtualT vat.virt_to_phys_iter(phys_mem, translator, uncached_iter, out, out_fail); } - out.extend(uncached_out.into_iter().map(|(paddr, (addr, buf))| { - tlb.cache_entry(translator, addr, paddr, arch); - (paddr, buf) - })); - - out_fail.extend(uncached_out_fail.into_iter().map(|(err, vaddr, (_, buf))| { - tlb.cache_invalid_if_uncached(translator, vaddr, buf.length(), arch); - (err, vaddr, buf) - })); + out.extend( + uncached_out + .into_iter() + .map(|CTup3(paddr, meta_addr, (addr, buf))| { + tlb.cache_entry(translator, addr, paddr, arch); + CTup3(paddr, meta_addr, buf) + }), + ); + + out_fail.extend(uncached_out_fail.into_iter().map( + |(err, CTup3(vaddr, meta_addr, (_, buf)))| { + tlb.cache_invalid_if_uncached(translator, vaddr, buf.length() as umem, arch); + (err, CTup3(vaddr, meta_addr, buf)) + }, + )); self.hitc += hitc; self.misc += misc; @@ -228,7 +244,7 @@ pub struct CachedVirtualTranslateBuilder { arch: Option, } -impl CachedVirtualTranslateBuilder { +impl CachedVirtualTranslateBuilder { fn new(vat: V) -> Self { Self { vat, @@ -239,15 +255,21 @@ impl CachedVirtualTranslateBuilder CachedVirtualTranslateBuilder { +impl CachedVirtualTranslateBuilder { pub fn build(self) -> Result> { Ok(CachedVirtualTranslate::new( self.vat, - TLBCache::new( - self.entries.ok_or("entries must be initialized")?, + TlbCache::new( + self.entries.ok_or_else(|| { + Error(ErrorOrigin::Cache, ErrorKind::Uninitialized) + .log_error("entries must be initialized") + })?, self.validator, ), - self.arch.ok_or("arch must be initialized")?, + self.arch.ok_or_else(|| { + Error(ErrorOrigin::Cache, ErrorKind::Uninitialized) + .log_error("arch must be initialized") + })?, )) } @@ -268,34 +290,35 @@ impl CachedVirtualTranslateBuilder self } - pub fn arch(mut self, arch: ArchitectureObj) -> Self { - self.arch = Some(arch); + pub fn arch(mut self, arch: impl Into) -> Self { + self.arch = Some(arch.into()); self } } #[cfg(test)] mod tests { + use super::*; use crate::architecture::x86; - + use crate::dummy::{DummyMemory, DummyOs}; use crate::error::PartialResultExt; - use crate::mem::cache::cached_vat::CachedVirtualTranslate; - use crate::mem::cache::timed_validator::TimedCacheValidator; - use crate::mem::{dummy::DummyMemory, DirectTranslate, PhysicalMemory}; - use crate::mem::{VirtualDMA, VirtualMemory}; + use crate::mem::{DirectTranslate, PhysicalMemory}; + use crate::mem::{MemoryView, VirtualDma}; + use crate::types::cache::timed_validator::TimedCacheValidator; use crate::types::{size, Address}; + use coarsetime::Duration; fn build_mem( buf: &[u8], ) -> ( impl PhysicalMemory, - impl VirtualMemory + Clone, + impl MemoryView + Clone, Address, Address, ) { - let (mem, dtb, virt_base) = - DummyMemory::new_and_dtb(buf.len() + size::mb(2), buf.len(), buf); + let mem = DummyMemory::new(buf.len() + size::mb(2)); + let (os, dtb, virt_base) = DummyOs::new_and_dtb(mem, buf.len(), buf); let translator = x86::x64::new_translator(dtb); let vat = CachedVirtualTranslate::builder(DirectTranslate::new()) @@ -304,7 +327,10 @@ mod tests { .entries(2048) .build() .unwrap(); - let vmem = VirtualDMA::with_vat(mem.clone(), x86::x64::ARCH, translator, vat); + + let mem = os.into_inner(); + + let vmem = VirtualDma::with_vat(mem.clone(), x86::x64::ARCH, translator, vat); (mem, vmem, virt_base, dtb) } @@ -312,7 +338,7 @@ mod tests { fn standard_buffer(size: usize) -> Vec { (0..size) .step_by(std::mem::size_of_val(&size)) - .flat_map(|v| v.to_le_bytes().iter().copied().collect::>()) + .flat_map(|v| v.to_le_bytes().to_vec()) .collect() } @@ -324,17 +350,19 @@ mod tests { let buffer = standard_buffer(size::mb(2)); let (mut mem, mut vmem, virt_base, dtb) = build_mem(&buffer); - let mut read_into = vec![0; size::mb(2)]; - vmem.virt_read_raw_into(virt_base, &mut read_into) + let mut read_into = vec![0u8; size::mb(2)]; + + vmem.read_raw_into(virt_base, &mut read_into) .data() .unwrap(); + assert!(read_into == buffer); // Destroy the page tables - mem.phys_write_raw(dtb.into(), &vec![0; size::kb(4)]) + mem.phys_write(dtb.into(), vec![0u8; size::kb(4)].as_slice()) .unwrap(); - vmem.virt_read_raw_into(virt_base, &mut read_into) + vmem.read_raw_into(virt_base, &mut read_into) .data() .unwrap(); assert!(read_into == buffer); @@ -343,12 +371,12 @@ mod tests { let mut vmem_cloned = vmem.clone(); vmem_cloned - .virt_read_raw_into(virt_base, &mut read_into) + .read_raw_into(virt_base, &mut read_into) .data() .unwrap(); assert!(read_into == buffer); - vmem.virt_read_raw_into(virt_base, &mut read_into) + vmem.read_raw_into(virt_base, &mut read_into) .data() .unwrap(); assert!(read_into == buffer); diff --git a/apex_dma/memflow_lib/memflow/src/mem/cache/tlb_cache.rs b/apex_dma/memflow_lib/memflow/src/mem/virt_translate/cache/tlb_cache.rs similarity index 77% rename from apex_dma/memflow_lib/memflow/src/mem/cache/tlb_cache.rs rename to apex_dma/memflow_lib/memflow/src/mem/virt_translate/cache/tlb_cache.rs index 6b96cac..36928a6 100644 --- a/apex_dma/memflow_lib/memflow/src/mem/cache/tlb_cache.rs +++ b/apex_dma/memflow_lib/memflow/src/mem/virt_translate/cache/tlb_cache.rs @@ -1,18 +1,18 @@ use std::prelude::v1::*; -use super::CacheValidator; -use crate::architecture::{ArchitectureObj, ScopedVirtualTranslate}; -use crate::error::{Error, Result}; -use crate::types::{Address, PhysicalAddress}; +use super::VirtualTranslate3; +use crate::architecture::ArchitectureObj; +use crate::error::{Error, ErrorKind, ErrorOrigin, Result}; +use crate::types::{cache::CacheValidator, umem, Address, PhysicalAddress}; #[derive(Clone, Copy)] -pub struct TLBEntry { - pub pt_index: usize, +pub struct TlbEntry { + pub pt_index: umem, pub virt_addr: Address, pub phys_addr: PhysicalAddress, } -impl TLBEntry { +impl TlbEntry { pub const fn create_invalid() -> Self { Self { pt_index: !0, @@ -24,7 +24,7 @@ impl TLBEntry { #[derive(Clone, Copy)] pub struct CachedEntry { - pt_index: usize, + pt_index: umem, virt_page: Address, phys_page: PhysicalAddress, } @@ -38,12 +38,12 @@ impl CachedEntry { } #[derive(Clone)] -pub struct TLBCache { +pub struct TlbCache { entries: Box<[CachedEntry]>, pub validator: T, } -impl TLBCache { +impl TlbCache { pub fn new(size: usize, mut validator: T) -> Self { validator.allocate_slots(size); @@ -55,21 +55,21 @@ impl TLBCache { #[inline] fn get_cache_index(&self, page_addr: Address, page_size: usize) -> usize { - ((page_addr.as_u64() / (page_size as u64)) % (self.entries.len() as u64)) as usize + ((page_addr.to_umem() / page_size as umem) % (self.entries.len() as umem)) as usize } #[inline] - pub fn is_read_too_long(&self, arch: ArchitectureObj, size: usize) -> bool { - size / arch.page_size() > self.entries.len() + pub fn is_read_too_long(&self, arch: ArchitectureObj, size: umem) -> bool { + size / arch.page_size() as umem > self.entries.len() as umem } #[inline] - pub fn try_entry( + pub fn try_entry( &self, translator: &D, addr: Address, arch: ArchitectureObj, - ) -> Option> { + ) -> Option> { let pt_index = translator.translation_table_id(addr); let page_size = arch.page_size(); let page_address = addr.as_page_aligned(page_size); @@ -80,7 +80,7 @@ impl TLBCache { && self.validator.is_slot_valid(idx) { if entry.phys_page.is_valid() && entry.phys_page.has_page() { - Some(Ok(TLBEntry { + Some(Ok(TlbEntry { pt_index, virt_addr: addr, // TODO: this should be aware of huge pages @@ -88,11 +88,11 @@ impl TLBCache { entry.phys_page.address().as_page_aligned(page_size) + (addr - page_address), entry.phys_page.page_type(), - page_size, + page_size as umem, ), })) } else { - Some(Err(Error::VirtualTranslate)) + Some(Err(Error(ErrorOrigin::TlbCache, ErrorKind::NotFound))) } } else { None @@ -100,7 +100,7 @@ impl TLBCache { } #[inline] - pub fn cache_entry( + pub fn cache_entry( &mut self, translator: &D, in_addr: Address, @@ -119,19 +119,19 @@ impl TLBCache { } #[inline] - pub fn cache_invalid_if_uncached( + pub fn cache_invalid_if_uncached( &mut self, translator: &D, in_addr: Address, - invalid_len: usize, + invalid_len: umem, arch: ArchitectureObj, ) { let pt_index = translator.translation_table_id(in_addr); let page_size = arch.page_size(); let page_addr = in_addr.as_page_aligned(page_size); - let end_addr = (in_addr + invalid_len + 1).as_page_aligned(page_size); + let end_addr = (in_addr + invalid_len + 1_usize).as_page_aligned(page_size); - for i in (page_addr.as_u64()..end_addr.as_u64()) + for i in (page_addr.to_umem()..end_addr.to_umem()) .step_by(page_size) .take(self.entries.len()) { diff --git a/apex_dma/memflow_lib/memflow/src/mem/virt_translate/direct_translate.rs b/apex_dma/memflow_lib/memflow/src/mem/virt_translate/direct_translate.rs index 4872528..01ec50a 100644 --- a/apex_dma/memflow_lib/memflow/src/mem/virt_translate/direct_translate.rs +++ b/apex_dma/memflow_lib/memflow/src/mem/virt_translate/direct_translate.rs @@ -1,50 +1,50 @@ -use super::VirtualTranslate; -use crate::architecture::ScopedVirtualTranslate; -use crate::error::Error; +use super::{VirtualTranslate2, VirtualTranslate3, VtopFailureCallback, VtopOutputCallback}; use crate::iter::SplitAtIndex; use crate::mem::PhysicalMemory; -use crate::types::{Address, PhysicalAddress}; -use bumpalo::Bump; +use crate::types::{size, Address}; +use cglue::tuple::*; +use std::prelude::v1::*; /* -The `DirectTranslate` struct provides a default implementation for `VirtualTranslate` for physical memory. +The `DirectTranslate` struct provides a default implementation for `VirtualTranslate2` for physical memory. */ #[derive(Debug, Default)] pub struct DirectTranslate { - arena: Bump, + tmp_buf: Box<[std::mem::MaybeUninit]>, } impl DirectTranslate { pub fn new() -> Self { + Self::with_capacity(size::mb(64)) + } + + pub fn with_capacity(size: usize) -> Self { Self { - arena: Bump::with_capacity(0x4000), + tmp_buf: vec![std::mem::MaybeUninit::new(0); size].into_boxed_slice(), } } } impl Clone for DirectTranslate { fn clone(&self) -> Self { - Self::new() + Self::with_capacity(self.tmp_buf.len()) } } -impl VirtualTranslate for DirectTranslate { - fn virt_to_phys_iter( +impl VirtualTranslate2 for DirectTranslate { + fn virt_to_phys_iter( &mut self, phys_mem: &mut T, translator: &D, addrs: VI, - out: &mut VO, - out_fail: &mut FO, + out: &mut VtopOutputCallback, + out_fail: &mut VtopFailureCallback, ) where T: PhysicalMemory + ?Sized, B: SplitAtIndex, - D: ScopedVirtualTranslate, - VI: Iterator, - VO: Extend<(PhysicalAddress, B)>, - FO: Extend<(Error, Address, B)>, + D: VirtualTranslate3, + VI: Iterator>, { - self.arena.reset(); - translator.virt_to_phys_iter(phys_mem, addrs, out, out_fail, &self.arena) + translator.virt_to_phys_iter(phys_mem, addrs, out, out_fail, &mut self.tmp_buf) } } diff --git a/apex_dma/memflow_lib/memflow/src/mem/virt_translate/mmu/def.rs b/apex_dma/memflow_lib/memflow/src/mem/virt_translate/mmu/def.rs new file mode 100644 index 0000000..012336a --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/mem/virt_translate/mmu/def.rs @@ -0,0 +1,162 @@ +use super::ArchMmuSpec; +use crate::architecture::Endianess; +use crate::types::{clamp_to_usize, umem, Address}; + +/// The `ArchMmuDef` structure defines how a real memory management unit should behave when +/// translating virtual memory addresses to physical ones. +/// +/// The core logic of virtual to physical memory translation is practically the same, but different +/// MMUs may have different address space sizes, and thus split the addresses in different ways. +/// +/// For instance, most x86_64 architectures have 4 levels of page mapping, providing 52-bit address +/// space. Virtual address gets split into 4 9-bit regions, and a 12-bit one, the first 4 are used +/// to index the page tables, and the last, 12-bit split is used as an offset to get the final +/// memory address. Meanwhile, x86 with PAE has 3 levels of page mapping, providing 36-bit address +/// space. Virtual address gets split into a 2-bit, 2 9-bit and a 12-bit regions - the last one is +/// also used as an offset from the physical frame. The difference is of level count, and virtual +/// address splits, but the core page table walk stays the same. +/// +/// Our virtual to physical memory ranslation code is the same for both architectures, in fact, it +/// is also the same for the x86 (non-PAE) architecture that has different PTE and pointer sizes. +/// All that differentiates the translation process is the data inside this structure. +#[derive(Debug)] +pub struct ArchMmuDef { + /// defines the way virtual addresses gets split (the last element + /// being the final physical page offset, and thus treated a bit differently) + pub virtual_address_splits: &'static [u8], + /// defines at which page mapping steps we can return a large page. + /// Steps are indexed from 0, and the list has to be sorted, otherwise the code may fail. + pub valid_final_page_steps: &'static [usize], + /// define the address space upper bound (32 for x86, 52 for x86_64) + pub address_space_bits: u8, + /// Defines the byte order of the architecture + pub endianess: Endianess, + /// native pointer size in bytes for the architecture. + pub addr_size: u8, + /// size of an individual page table entry in bytes. + pub pte_size: usize, + /// index of a bit in PTE defining whether the page is present or not. + pub present_bit: fn(Address) -> bool, + /// index of a bit in PTE defining if the page is writeable. + pub writeable_bit: fn(Address, bool) -> bool, + /// index of a bit in PTE defining if the page is non-executable. + pub nx_bit: fn(Address, bool) -> bool, + /// function for checking a bit in PTE to see if the PTE points to a large page. + pub large_page_bit: fn(Address) -> bool, +} + +impl ArchMmuDef { + pub const fn into_spec(self) -> ArchMmuSpec { + ArchMmuSpec::from_def(self) + } + + /// Mask a page table entry address to retrieve the next page table entry + /// + /// This function uses virtual_address_splits to mask the first bits out in `pte_addr`, but + /// keep everything else until the `address_space_bits` upper bound. + /// + /// # Arguments + /// + /// * `pte_addr` - page table entry address to mask + /// * `step` - the current step in the page walk + /// + /// # Remarks + /// + /// The final step is handled differently, because the final split provides a byte offset to + /// the page, instead of an offset that has to be multiplied by `pte_size`. We do that by + /// subtracting `pte_size` logarithm from the split size. + #[allow(unused)] + pub fn pte_addr_mask(&self, pte_addr: Address, step: usize) -> umem { + let max = self.address_space_bits - 1; + let min = self.virtual_address_splits[step] + + if step == self.virtual_address_splits.len() - 1 { + 0 + } else { + self.pte_size.to_le().trailing_zeros() as u8 + }; + let mask = Address::bit_mask(min..=max); + pte_addr.to_umem() & umem::from_le(mask.to_umem()) + } + + pub(crate) const fn virt_addr_bit_range(&self, step: usize) -> (u8, u8) { + let max_index_bits = { + let subsl = &self.virtual_address_splits; + let mut accum = 0; + let mut i = step; + while i < subsl.len() { + accum += subsl[i]; + i += 1; + } + accum + }; + let min_index_bits = max_index_bits - self.virtual_address_splits[step]; + (min_index_bits, max_index_bits) + } + + /// Return the number of splits of virtual addresses + /// + /// The returned value will be one more than the number of page table levels + #[allow(unused)] + pub fn split_count(&self) -> usize { + self.virtual_address_splits.len() + } + + /// Returns the upper bound of number of splits that can occur when performing translation + pub const fn spare_allocs(&self) -> usize { + let mut i = 1; + let mut fold = 0; + while i < self.virtual_address_splits.len() { + fold += 1 << self.virtual_address_splits[i - 1]; + i += 1; + } + fold + } + + /// Calculate the size of the page table entry leaf in bytes + /// + /// This will return the number of page table entries at a specific step multiplied by the + /// `pte_size`. Usually this will be an entire page, but in certain cases, like the highest + /// mapping level of x86 with PAE, it will be less. + /// + /// # Arguments + /// + /// * `step` - the current step in the page walk + pub const fn pt_leaf_size(&self, step: usize) -> usize { + let (min, max) = self.virt_addr_bit_range(step); + clamp_to_usize((1 << (max - min)) * self.pte_size as umem) + } + + /// Get the page size of a specific step without checking if such page could exist + /// + /// # Arguments + /// + /// * `step` - the current step in the page walk + #[allow(unused)] + pub const fn page_size_step_unchecked(&self, step: usize) -> umem { + let max_index_bits = { + let subsl = &self.virtual_address_splits; + let mut i = step; + let mut accum = 0; + while i < subsl.len() { + accum += subsl[i]; + i += 1; + } + accum + }; + 1 << max_index_bits + } + + /// Get the page size of a specific page walk step + /// + /// This function is preferable to use externally, because in debug builds it will check if such + /// page could exist, and if can not, it will panic + /// + /// # Arguments + /// + /// * `step` - the current step in the page walk + #[allow(unused)] + pub fn page_size_step(&self, step: usize) -> umem { + debug_assert!(self.valid_final_page_steps.binary_search(&step).is_ok()); + self.page_size_step_unchecked(step) + } +} diff --git a/apex_dma/memflow_lib/memflow/src/mem/virt_translate/mmu/mod.rs b/apex_dma/memflow_lib/memflow/src/mem/virt_translate/mmu/mod.rs new file mode 100644 index 0000000..04ce759 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/mem/virt_translate/mmu/mod.rs @@ -0,0 +1,90 @@ +pub(crate) mod def; +pub(crate) mod spec; +pub(crate) mod translate_data; + +use super::VtopFailureCallback; +use crate::iter::SplitAtIndex; +use crate::types::{umem, Address}; +use cglue::tuple::*; +pub(crate) use def::ArchMmuDef; +pub(crate) use fixed_slice_vec::FixedSliceVec as MVec; +pub(crate) use spec::ArchMmuSpec; +pub(crate) use translate_data::FlagsType; +use translate_data::{TranslateDataVec, TranslateVec, TranslationChunk}; + +pub trait MmuTranslationBase: Clone + Copy + core::fmt::Debug { + /// Retrieves page table address by virtual address + fn get_pt_by_virt_addr(&self, address: Address) -> Address; + + /// Retrieves page table address, and its index by index within + /// For instance, on Arm index 257 would return kernel page table + /// address, and index 1. On X86, however, this is a no-op that returns + /// underlying page table Address and `idx`. + fn get_pt_by_index(&self, idx: usize) -> (Address, usize); + + /// Retrieves number of page tables used by translation base. 1 on X86, + /// 1-2 on Arm (Win32 Arm merges both page tables) + fn pt_count(&self) -> usize; + + fn virt_addr_filter( + &self, + spec: &ArchMmuSpec, + addr: CTup3, + work_group: (&mut TranslationChunk, &mut TranslateDataVec), + out_fail: &mut VtopFailureCallback, + ); + + fn fill_init_chunk( + &self, + spec: &ArchMmuSpec, + out_fail: &mut VtopFailureCallback, + addrs: &mut VI, + (next_work_addrs, tmp_addrs): (&mut TranslateDataVec, &mut TranslateDataVec), + work_vecs: &mut (TranslateVec, TranslateDataVec), + wait_vecs: &mut (TranslateVec, TranslateDataVec), + ) where + VI: Iterator>, + B: SplitAtIndex, + { + let mut init_chunk = TranslationChunk::new(*self, FlagsType::NONE); + + let working_addr_count = work_vecs.1.capacity(); + + for (_, data) in (0..working_addr_count).zip(addrs) { + self.virt_addr_filter(spec, data, (&mut init_chunk, next_work_addrs), out_fail); + if init_chunk.next_max_addr_count(spec) >= working_addr_count as umem { + break; + } + } + + if init_chunk.addr_count > 0 { + init_chunk.split_chunk(spec, (next_work_addrs, tmp_addrs), work_vecs, wait_vecs); + } + } +} + +impl MmuTranslationBase for Address { + fn get_pt_by_virt_addr(&self, _: Address) -> Address { + *self + } + + fn get_pt_by_index(&self, idx: usize) -> (Address, usize) { + (*self, idx) + } + + fn pt_count(&self) -> usize { + 1 + } + + fn virt_addr_filter( + &self, + spec: &ArchMmuSpec, + addr: CTup3, + work_group: (&mut TranslationChunk, &mut TranslateDataVec), + out_fail: &mut VtopFailureCallback, + ) where + B: SplitAtIndex, + { + spec.virt_addr_filter(addr, work_group, out_fail); + } +} diff --git a/apex_dma/memflow_lib/memflow/src/mem/virt_translate/mmu/spec.rs b/apex_dma/memflow_lib/memflow/src/mem/virt_translate/mmu/spec.rs new file mode 100644 index 0000000..c5f94e3 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/mem/virt_translate/mmu/spec.rs @@ -0,0 +1,716 @@ +use crate::architecture::Endianess; +use crate::error::{Error, ErrorKind, ErrorOrigin, Result}; +use crate::iter::FlowIters; +use crate::iter::SplitAtIndex; +use crate::mem::{PhysicalMemory, PhysicalReadData}; +use crate::types::{umem, Address, PageType, PhysicalAddress, UMEM_BITS}; +use cglue::tuple::*; + +use super::super::{VtopFailureCallback, VtopOutputCallback}; +use super::translate_data::{ + FlagsType, TranslateData, TranslateDataVec, TranslateVec, TranslationChunk, +}; +use super::ArchMmuDef; +use super::MmuTranslationBase; + +pub(crate) use fixed_slice_vec::FixedSliceVec as MVec; + +use std::convert::TryInto; + +#[cfg(feature = "trace_mmu")] +macro_rules! vtop_trace { + ( $( $x:expr ),* ) => { + log::trace!( $($x, )* ); + } +} + +#[cfg(not(feature = "trace_mmu"))] +macro_rules! vtop_trace { + ( $( $x:expr ),* ) => {}; +} + +const MAX_LEVELS: usize = 8; + +pub struct ArchMmuSpec { + pub def: ArchMmuDef, + pub pte_addr_masks: [umem; MAX_LEVELS], + pub virt_addr_bit_ranges: [(u8, u8); MAX_LEVELS], + pub virt_addr_masks: [umem; MAX_LEVELS], + pub virt_addr_page_masks: [umem; MAX_LEVELS], + pub valid_final_page_steps: [bool; MAX_LEVELS], + pub pt_leaf_size: [usize; MAX_LEVELS], + pub page_size_step: [umem; MAX_LEVELS], + pub spare_allocs: usize, +} + +impl From for ArchMmuSpec { + fn from(def: ArchMmuDef) -> Self { + Self::from_def(def) + } +} + +impl ArchMmuSpec { + pub const fn from_def(def: ArchMmuDef) -> Self { + let mut pte_addr_masks = [0; MAX_LEVELS]; + let mut virt_addr_bit_ranges = [(0, 0); MAX_LEVELS]; + let mut virt_addr_masks = [0; MAX_LEVELS]; + let mut virt_addr_page_masks = [0; MAX_LEVELS]; + let mut valid_final_page_steps = [false; MAX_LEVELS]; + let mut pt_leaf_size: [usize; MAX_LEVELS] = [0; MAX_LEVELS]; + let mut page_size_step: [umem; MAX_LEVELS] = [0; MAX_LEVELS]; + let spare_allocs = def.spare_allocs(); + + let mut i = 0; + while i < def.virtual_address_splits.len() { + let max = def.address_space_bits - 1; + let min = def.virtual_address_splits[i] + + if i == def.virtual_address_splits.len() - 1 { + 0 + } else { + def.pte_size.to_le().trailing_zeros() as u8 + }; + let mask = Address::bit_mask_u8(min..=max); + pte_addr_masks[i] = mask.to_umem(); + + pt_leaf_size[i] = def.pt_leaf_size(i); + page_size_step[i] = def.page_size_step_unchecked(i); + + let (min, max) = def.virt_addr_bit_range(i); + virt_addr_bit_ranges[i] = (min, max); + virt_addr_masks[i] = Address::bit_mask_u8(0..=max - min - 1).to_umem(); + virt_addr_page_masks[i] = Address::bit_mask_u8(0..=max - 1).to_umem(); + + i += 1; + } + + i = 0; + while i < def.valid_final_page_steps.len() { + valid_final_page_steps[def.valid_final_page_steps[i]] = true; + i += 1; + } + + Self { + def, + pte_addr_masks, + virt_addr_bit_ranges, + virt_addr_masks, + virt_addr_page_masks, + valid_final_page_steps, + pt_leaf_size, + page_size_step, + spare_allocs, + } + } + + pub fn pte_addr_mask(&self, pte_addr: Address, step: usize) -> umem { + pte_addr.to_umem() & umem::from_le(self.pte_addr_masks[step]) + } + + /// Filter out the input virtual address range to be in bounds + /// + /// + /// # Arguments + /// + /// * `(addr, buf)` - an address and buffer pair that gets split and filtered + /// * `valid_out` - output collection that contains valid splits + /// * `fail_out` - the final collection where the function will push rejected ranges to + /// + /// # Remarks + /// + /// This function cuts the input virtual address to be inside range `(-2^address_space_bits; + /// +2^address_space_bits)`. It may result in 2 ranges, and it may have up to 2 failed ranges + pub(crate) fn virt_addr_filter( + &self, + CTup3(addr, meta_addr, buf): CTup3, + (chunks, addrs_out): (&mut TranslationChunk, &mut TranslateDataVec), + fail_out: &mut VtopFailureCallback, + ) where + B: SplitAtIndex, + C: MmuTranslationBase, + { + vtop_trace!("total {:x}+{:x}", addr, buf.length()); + let tr_data = TranslateData { + addr, + meta_addr, + buf, + }; + + // Trim to virt address space limit + let (left, reject) = tr_data + .split_inclusive_at(Address::bit_mask(0..=(self.def.addr_size * 8 - 1)).to_umem()); + let left = left.unwrap(); + + if let Some(data) = reject { + // TODO: handle condition + let _ = fail_out.call(( + Error(ErrorOrigin::Mmu, ErrorKind::OutOfMemoryRange), + CTup3(data.addr, data.meta_addr, data.buf), + )); + } + + let virt_bit_range = self.virt_addr_bit_ranges[0].1; + let virt_range: umem = 1 << (virt_bit_range - 1); + vtop_trace!("vbr {:x} | {:x}", virt_bit_range, virt_range); + let arch_bit_range: umem = (!0) >> (UMEM_BITS - self.def.addr_size * 8); + + let (lower, higher) = left.split_at_address(virt_range.into()); + + if let Some(data) = higher { + let (reject, higher) = + data.split_at_address_rev((arch_bit_range.wrapping_sub(virt_range)).into()); + + if let Some(data) = reject { + // TODO: handle condition + let _ = fail_out.call(( + Error(ErrorOrigin::Mmu, ErrorKind::OutOfMemoryRange), + CTup3(data.addr, data.meta_addr, data.buf), + )); + } + + if let Some(higher) = higher { + // The upper half has to be all negative (all bits set), so compare the masks + // to see if it is the case. + let lhs = + Address::bit_mask(virt_bit_range..=(self.def.addr_size * 8 - 1)).to_umem(); + let rhs = higher.addr.to_umem() & lhs; + + if (lhs ^ rhs) == 0 { + vtop_trace!("higher {:x}+{:x}", higher.addr, higher.length()); + chunks.push_data(higher, addrs_out); + } else { + // TODO: handle condition + let _ = fail_out.call(( + Error(ErrorOrigin::Mmu, ErrorKind::OutOfMemoryRange), + CTup3(higher.addr, higher.meta_addr, higher.buf), + )); + } + } + } + + if let Some(lower) = lower { + vtop_trace!("lower {:x}+{:x}", lower.addr, lower.length()); + chunks.push_data(lower, addrs_out); + } + } + + #[allow(unused)] + pub fn split_count(&self) -> usize { + self.def.virtual_address_splits.len() + } + + pub fn pt_leaf_size(&self, step: usize) -> usize { + self.pt_leaf_size[step] + } + + /// Perform a virtual translation step, returning the next PTE address to read + /// + /// # Arguments + /// + /// * `pte_addr` - input PTE address that was read the last time (or DTB) + /// * `virt_addr` - virtual address we are translating + /// * `step` - the current step in the page walk + pub fn vtop_step(&self, pte_addr: Address, virt_addr: Address, step: usize) -> Address { + Address::from( + self.pte_addr_mask(pte_addr, step) | self.virt_addr_to_pte_offset(virt_addr, step), + ) + } + + pub fn virt_addr_to_pte_offset(&self, virt_addr: Address, step: usize) -> umem { + umem::from_le( + (virt_addr.to_umem().to_le() >> self.virt_addr_bit_ranges[step].0) + & self.virt_addr_masks[step], + ) * self.def.pte_size as umem + } + + pub fn virt_addr_to_page_offset(&self, virt_addr: Address, step: usize) -> umem { + virt_addr.to_umem() & umem::from_le(self.virt_addr_page_masks[step]) + } + + /// Get the page size of a specific step without checking if such page could exist + /// + /// # Arguments + /// + /// * `step` - the current step in the page walk + pub fn page_size_step_unchecked(&self, step: usize) -> umem { + self.page_size_step[step] + } + + /// Get the page size of a specific page walk step + /// + /// This function is preferable to use externally, because in debug builds it will check if such + /// page could exist, and if can not, it will panic + /// + /// # Arguments + /// + /// * `step` - the current step in the page walk + pub fn page_size_step(&self, step: usize) -> umem { + debug_assert!(self.valid_final_page_steps[step]); + self.page_size_step_unchecked(step) + } + + /// Get the page size of a specific mapping level + /// + /// This function is the same as `page_size_step`, but the `level` almost gets inverted. It + /// goes in line with x86 page level naming. With 1 being the 4kb page, and higher meaning + /// larger page. + /// + /// # Arguments + /// + /// * `level` - page mapping level to get the size of (1 meaning the smallest page) + pub fn page_size_level(&self, level: usize) -> umem { + self.page_size_step(self.def.virtual_address_splits.len() - level) + } + + /// Get the final physical page + /// + /// This performs the final step of a successful translation - retrieve the final physical + /// address. It does not perform any present checks, and assumes `pte_addr` points to a valid + /// page. + /// + /// # Arguments + /// + /// * `pte_addr` - the address inside the previously read PTE + /// * `virt_addr` - the virtual address we are currently translating + /// * `step` - the current step in the page walk + pub fn get_phys_page( + &self, + pte_addr: Address, + virt_addr: Address, + step: usize, + prev_flags: FlagsType, + ) -> PhysicalAddress { + let phys_addr = Address::from( + self.pte_addr_mask(pte_addr, step) | self.virt_addr_to_page_offset(virt_addr, step), + ); + + PhysicalAddress::with_page( + phys_addr, + PageType::default() + .write((self.def.writeable_bit)( + pte_addr, + prev_flags.contains(FlagsType::WRITEABLE), + )) + .noexec((self.def.nx_bit)( + pte_addr, + prev_flags.contains(FlagsType::NX), + )), + self.page_size_step(step), + ) + } + + /// Check if the current page table entry is valid + /// + /// # Arguments + /// + /// * `pte_addr` - current page table entry + /// * `step` - the current step in the page walk + pub fn check_entry(&self, pte_addr: Address, step: usize) -> bool { + step == 0 || (self.def.present_bit)(pte_addr) + } + + /// Check if the current page table entry contains a physical page + /// + /// This will check `valid_final_page_steps` to determine if the PTE could have a large page, + /// and then check the large page bit for confirmation. It will always return true on the final + /// mapping regarding of the values in `valid_final_page_steps`. The `valid_final_page_steps` + /// list has to be sorted for the function to work properly, because it uses binary search. + /// + /// # Arguments + /// + /// * `pte_addr` - current page table entry + /// * `step` - the current step the page walk + pub fn is_final_mapping(&self, pte_addr: Address, step: usize) -> bool { + (step == self.def.virtual_address_splits.len() - 1) + || ((self.def.large_page_bit)(pte_addr) && self.valid_final_page_steps[step]) + } + + /// This function will do a virtual to physical memory translation for the `ArchMmuSpec` in + /// `MmuTranslationBase` scope, over multiple elements. + pub(crate) fn virt_to_phys_iter( + &self, + mem: &mut T, + dtb: D, + addrs: VI, + out: &mut VtopOutputCallback, + out_fail: &mut VtopFailureCallback, + slice: &mut [std::mem::MaybeUninit], + ) where + T: PhysicalMemory + ?Sized, + B: SplitAtIndex, + D: MmuTranslationBase, + VI: Iterator>, + { + vtop_trace!("virt_to_phys_iter_with_mmu"); + + let mut addrs = addrs.double_peekable(); + + // We need to calculate in advance how we are going to split the allocated buffer. + // There is one important parameter `elem_count`, which determines + // how many chunks and addresses we allow in the working stack. + // + // Waiting stacks are much larger, because if working stack is full from the start, + // and it needs to be split to smaller chunks, we need space for them. In addition, + // we need to reserve enough space for several more splits like that, because + // the same scenario can occur for every single page mapping level. + let chunk_size = std::mem::size_of::>(); + let data_size = std::mem::size_of::>(); + let prd_size = std::mem::size_of::(); + let pte_size = self.def.pte_size; + let spare_allocs = self.spare_allocs; + + let total_chunks_mul = 1 + spare_allocs; + let working_stack_count = 2; + let total_addr_mul = spare_allocs; + + let size_per_elem = (total_chunks_mul + working_stack_count) * chunk_size + + pte_size + + prd_size + // The +1 is for tmp_addrs + + (total_addr_mul + working_stack_count + 1) * data_size; + + let (elem_count, waiting_chunks, waiting_addr_count) = { + // 2 * 8 are extra bytes for alignment in read funcs + let elem_count = (slice.len() - 2 * 8) / size_per_elem; + let waiting_chunks = elem_count * (1 + spare_allocs); + let waiting_addr_count = elem_count * spare_allocs; + + // We need to support at least the number of addresses virt_addr_filter is going to split + // us into. It is a tough one, but 2 is the bare minimum for x86 + if elem_count == 0 { + // This is for the case of single element translation + if !addrs.is_next_last() + || addrs + .double_peek() + .0 + .as_ref() + .map(|e| e.length()) + .unwrap_or(0) + > 1 + { + log::trace!( + "input buffer is too small! Stability not guaranteed! ({:x})", + slice.len() + ); + } + (1, 1, 1) + } else if elem_count < 3 { + log::trace!( + "input buffer may be too small! ({:x} {:x})", + elem_count, + slice.len() + ); + (elem_count, waiting_chunks, waiting_addr_count) + } else { + (elem_count, waiting_chunks, waiting_addr_count) + } + }; + + vtop_trace!( + "elem_count = {:x}; waiting_chunks = {:x};", + elem_count, + waiting_chunks + ); + + // Allocate buffers + let (working_bytes, slice) = slice.split_at_mut(elem_count * chunk_size); + let working_stack = MVec::from_uninit_bytes(working_bytes); + let (working_bytes, slice) = slice.split_at_mut(elem_count * chunk_size); + let working_stack2 = MVec::from_uninit_bytes(working_bytes); + let (waiting_bytes, slice) = slice.split_at_mut(waiting_chunks * chunk_size); + let waiting_stack = MVec::from_uninit_bytes(waiting_bytes); + + let (working_addrs_bytes, slice) = slice.split_at_mut(elem_count * data_size); + let working_addrs = MVec::from_uninit_bytes(working_addrs_bytes); + let (working_addrs_bytes, slice) = slice.split_at_mut(elem_count * data_size); + let mut working_addrs2 = MVec::from_uninit_bytes(working_addrs_bytes); + let (waiting_addrs_bytes, slice) = slice.split_at_mut(waiting_addr_count * data_size); + let waiting_addrs = MVec::from_uninit_bytes(waiting_addrs_bytes); + let (tmp_addrs_bytes, slice) = slice.split_at_mut(elem_count * data_size); + let mut tmp_addrs = MVec::from_uninit_bytes(tmp_addrs_bytes); + + let mut working_pair = (working_stack, working_addrs); + let mut waiting_pair = (waiting_stack, waiting_addrs); + + // Fill up working_pair and waiting_pair from the iterator + dtb.fill_init_chunk( + self, + out_fail, + &mut addrs, + (&mut working_addrs2, &mut tmp_addrs), + &mut working_pair, + &mut waiting_pair, + ); + + let mut next_working_pair = (working_stack2, working_addrs2); + + // Set up endianess translation functions + let buf_to_addr: fn(&[u8]) -> Address = match (self.def.endianess, self.def.pte_size) { + (Endianess::LittleEndian, 8) => { + |buf| Address::from(u64::from_le_bytes(buf.try_into().unwrap())) + } + (Endianess::LittleEndian, 4) => { + |buf| Address::from(u32::from_le_bytes(buf.try_into().unwrap())) + } + (Endianess::BigEndian, 8) => { + |buf| Address::from(u64::from_be_bytes(buf.try_into().unwrap())) + } + (Endianess::BigEndian, 4) => { + |buf| Address::from(u32::from_be_bytes(buf.try_into().unwrap())) + } + _ => |_| Address::NULL, + }; + + // see work_through_stack for usage + let mut prev_pt_address = [(Address::NULL, Address::NULL); MAX_LEVELS]; + + while !working_pair.0.is_empty() { + // Perform the reads here + if let Err(err) = + self.read_pt_address_iter(mem, &mut working_pair.0, slice, buf_to_addr) + { + vtop_trace!("read_pt_address_iter failure: {}", err); + + while let Some(data) = working_pair.1.pop() { + if !out_fail.call((err, CTup3(data.addr, data.meta_addr, data.buf))) { + return; + } + } + + while let Some(data) = waiting_pair.1.pop() { + if !out_fail.call((err, CTup3(data.addr, data.meta_addr, data.buf))) { + return; + } + } + + return; + } + + // Check read results, mark entries for lower levels, etc. etc. + self.work_through_stack( + &mut working_pair, + &mut next_working_pair, + out, + out_fail, + &mut waiting_pair, + &mut tmp_addrs, + &mut prev_pt_address, + ); + + debug_assert!(working_pair.1.is_empty()); + + // next_working_stack would get filled up if there were any splits going. + // Even if it is not fully filled up, it might not worth going through the + // trouble, because additional checks would negatively impact single element + // translations. (TODO: use some bool flag?). + // Instead, just swap the pairs, that is the fastest way to go. + if next_working_pair.0.is_empty() { + self.refill_stack( + dtb, + &mut working_pair, + &mut next_working_pair, + out_fail, + &mut addrs, + &mut waiting_pair, + &mut tmp_addrs, + ); + } else { + std::mem::swap(&mut working_pair, &mut next_working_pair); + } + } + + debug_assert!(waiting_pair.0.is_empty()); + debug_assert!(working_pair.0.is_empty()); + debug_assert!(next_working_pair.0.is_empty()); + } + + fn read_pt_address_iter( + &self, + mem: &mut T, + chunks: &mut TranslateVec, + slice: &mut [std::mem::MaybeUninit], + buf_to_addr: fn(&[u8]) -> Address, + ) -> Result<()> + where + T: PhysicalMemory + ?Sized, + { + let pte_size = self.def.pte_size; + + // Create temporary read bufs. + // We need extra bytes for alignment + let (pt_buf_bytes, slice) = slice.split_at_mut(chunks.len() * pte_size + 8); + let mut pt_buf = MVec::from_uninit_bytes(pt_buf_bytes); + let (pt_read_bytes, _slice) = + slice.split_at_mut(chunks.len() * std::mem::size_of::() + 8); + let mut pt_read = MVec::from_uninit_bytes(pt_read_bytes); + + pt_buf.extend((0..).map(|_| 0).take(pte_size * chunks.len())); + + for (chunk, tr_chunk) in pt_buf.chunks_exact_mut(pte_size).zip(chunks.iter()) { + pt_read.push(CTup3( + PhysicalAddress::with_page( + tr_chunk.pt_addr, + PageType::PAGE_TABLE, + self.pt_leaf_size(tr_chunk.step) as umem, + ), + Address::NULL, + chunk.into(), + )); + } + + let mut pt_iter = pt_read + .iter_mut() + .map(|CTup3(a, b, d): &mut PhysicalReadData| CTup3(*a, *b, d.into())); + + mem.phys_read_raw_iter((&mut pt_iter).into())?; + + // Move the read value into the chunk + for (ref mut chunk, CTup3(_, _, buf)) in chunks.iter_mut().zip(pt_read.iter()) { + let pt_addr = buf_to_addr(buf); + chunk.pt_addr = pt_addr; + // We assume the flags may either always inherit or never inherit. + // Thus, if there is a more insane architecture, that has it mixed, + // then open an issue report! + chunk.update_flags(&self.def); + } + + Ok(()) + } + + #[allow(clippy::too_many_arguments)] + fn refill_stack( + &self, + dtb: D, + working_pair: &mut (TranslateVec, TranslateDataVec), + next_working_pair: &mut (TranslateVec, TranslateDataVec), + out_fail: &mut VtopFailureCallback, + addrs: &mut VI, + waiting_pair: &mut (TranslateVec, TranslateDataVec), + tmp_addrs: &mut TranslateDataVec, + ) where + D: MmuTranslationBase, + VI: Iterator>, + { + // If there is a waiting stack, use it + if !waiting_pair.0.is_empty() { + let (working_stack, working_addrs) = working_pair; + let (waiting_stack, waiting_addrs) = waiting_pair; + + while let Some(mut chunk) = waiting_stack.pop() { + // Make sure working stack does not overflow + if working_stack.len() >= working_stack.capacity() + || working_addrs.len() >= working_addrs.capacity() + || (working_addrs.len() + chunk.addr_count > working_stack.capacity() + && !working_stack.is_empty()) + { + waiting_stack.push(chunk); + break; + } else { + // Move addresses between the stacks, and only until we fill up the + // address stack. + let mut new_chunk = TranslationChunk::new(chunk.pt_addr, chunk.prev_flags); + new_chunk.step = chunk.step; + for _ in + (0..chunk.addr_count).zip(working_addrs.len()..working_addrs.capacity()) + { + let addr = chunk.pop_data(waiting_addrs).unwrap(); + new_chunk.push_data(addr, working_addrs); + } + + if chunk.addr_count > 0 { + waiting_stack.push(chunk); + } + + working_stack.push(new_chunk); + } + } + } else { + dtb.fill_init_chunk( + self, + out_fail, + addrs, + (&mut next_working_pair.1, tmp_addrs), + working_pair, + waiting_pair, + ); + } + } + + #[inline(never)] + #[allow(clippy::too_many_arguments)] + fn work_through_stack( + &self, + (working_stack, working_addrs): &mut (TranslateVec, TranslateDataVec), + next_working_pair: &mut (TranslateVec, TranslateDataVec), + out: &mut VtopOutputCallback, + out_fail: &mut VtopFailureCallback, + waiting_pair: &mut (TranslateVec, TranslateDataVec), + tmp_addrs: &mut TranslateDataVec, + prev_pt_address: &mut [(Address, Address)], + ) { + while let Some(mut chunk) = working_stack.pop() { + vtop_trace!("chunk = {:x} {:x}", chunk.step, chunk.pt_addr); + + // This is extremely important! + // It is a something of a heuristic against + // page tables that have all entries set to the same page table. + // + // For instance, windows has such global page tables, it is actually + // just 2-3 page tables, starting from level 4 which go down one at + // a time, covering an insane region, but not actually pointing anywhere. + // + // Page map chokes on these scenarios, and once again - it's page tables + // that point nowhere! So we just try and ignore them. + // + // Some cases this _may_ cause issues, but it's extremely rare to have + // 2 identical pages right next to each other. If there is ever a documented + // case however, then we will need to workaround that. + let prev_address = prev_pt_address[chunk.step]; + let cur_addr = ( + chunk.pt_addr, + chunk + .min_addr + .as_mem_aligned(self.page_size_step_unchecked(chunk.step + 1)), + ); + prev_pt_address[chunk.step] = cur_addr; + + chunk.step += 1; + + if !self.check_entry(chunk.pt_addr, chunk.step + 1) + || (cur_addr.0 == prev_address.0 && cur_addr.1 != prev_address.1) + { + // Failure + while let Some(entry) = chunk.pop_data(working_addrs) { + // TODO: handle condition.. + let _ = out_fail.call(( + Error(ErrorOrigin::Mmu, ErrorKind::OutOfMemoryRange), + CTup3(entry.addr, entry.meta_addr, entry.buf), + )); + } + } else if self.is_final_mapping(chunk.pt_addr, chunk.step) { + // Success! + let pt_addr = chunk.pt_addr; + let step = chunk.step; + let prev_flags = chunk.prev_flags; + while let Some(entry) = chunk.pop_data(working_addrs) { + // TODO: handle condition.. + let _ = out.call(CTup3( + self.get_phys_page(pt_addr, entry.addr, step, prev_flags), + entry.meta_addr, + entry.buf, + )); + } + } else { + // We still need to continue the page walk. + // Split the chunk up into the waiting queue + chunk.split_chunk( + self, + (working_addrs, tmp_addrs), + next_working_pair, + waiting_pair, + ); + + debug_assert!(tmp_addrs.is_empty()); + } + } + } +} diff --git a/apex_dma/memflow_lib/memflow/src/mem/virt_translate/mmu/translate_data.rs b/apex_dma/memflow_lib/memflow/src/mem/virt_translate/mmu/translate_data.rs new file mode 100644 index 0000000..b99075e --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/mem/virt_translate/mmu/translate_data.rs @@ -0,0 +1,342 @@ +use crate::iter::SplitAtIndex; +use crate::types::{umem, Address}; + +use super::{ArchMmuDef, ArchMmuSpec, MmuTranslationBase}; + +use std::cmp::Ordering; + +use super::MVec; + +pub type TranslateVec<'a> = MVec<'a, TranslationChunk
>; +pub type TranslateDataVec<'a, T> = MVec<'a, TranslateData>; + +unsafe fn shorten_datavec_lifetime<'a: 'b, 'b, O>( + r: &'b mut TranslateDataVec<'a, O>, +) -> &'b mut TranslateDataVec<'b, O> { + std::mem::transmute(r) +} + +unsafe fn shorten_pair_lifetime<'a: 't, 'b: 't, 't, O>( + r: &'t mut (TranslateVec<'a>, TranslateDataVec<'b, O>), +) -> &'t mut (TranslateVec<'t>, TranslateDataVec<'t, O>) { + std::mem::transmute(r) +} + +#[derive(Debug)] +pub struct TranslateData { + pub addr: Address, + pub meta_addr: Address, + pub buf: T, +} + +impl TranslateData { + pub fn split_at_address(self, addr: Address) -> (Option, Option) { + let sub = self.addr.to_umem(); + self.split_at(addr.to_umem().saturating_sub(sub)) + } + + pub fn split_at_address_rev(self, addr: Address) -> (Option, Option) { + let base = self.addr + self.length(); + self.split_at_rev(base.to_umem().saturating_sub(addr.to_umem())) + } +} + +impl Ord for TranslateData { + fn cmp(&self, other: &Self) -> Ordering { + self.addr.cmp(&other.addr) + } +} + +impl Eq for TranslateData {} + +impl PartialOrd for TranslateData { + fn partial_cmp(&self, other: &Self) -> Option { + self.addr.partial_cmp(&other.addr) + } +} + +impl PartialEq for TranslateData { + fn eq(&self, other: &Self) -> bool { + self.addr == other.addr + } +} + +impl SplitAtIndex for TranslateData { + fn split_at(self, idx: umem) -> (Option, Option) + where + Self: Sized, + { + let addr = self.addr; + let meta_addr = self.meta_addr; + let (bleft, bright) = self.buf.split_at(idx); + + ( + bleft.map(|buf| TranslateData { + addr, + meta_addr, + buf, + }), + bright.map(|buf| TranslateData { + buf, + addr: addr + idx, + meta_addr: meta_addr + idx, + }), + ) + } + + unsafe fn split_at_mut(&mut self, idx: umem) -> (Option, Option) + where + Self: Sized, + { + let addr = self.addr; + let meta_addr = self.meta_addr; + let (bleft, bright) = self.buf.split_at_mut(idx); + + ( + bleft.map(|buf| TranslateData { + addr, + meta_addr, + buf, + }), + bright.map(|buf| TranslateData { + buf, + addr: addr + idx, + meta_addr: meta_addr + idx, + }), + ) + } + + fn length(&self) -> umem { + self.buf.length() + } + + fn size_hint(&self) -> usize { + self.buf.size_hint() + } +} + +bitflags! { + #[repr(transparent)] + #[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] + #[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))] + pub struct FlagsType: u8 { + const NONE = 0b00; + // Maps MMUDef's writeable_bit + const WRITEABLE = 0b01; + // Maps MMUDef's nx_bit + const NX = 0b10; + } +} + +/// Abstracts away a list of TranslateData in a splittable manner +#[derive(Debug)] +pub struct TranslationChunk { + pub pt_addr: T, + pub addr_count: usize, + pub min_addr: Address, + max_addr: Address, + pub step: usize, + pub prev_flags: FlagsType, +} + +impl FlagsType { + pub fn nx(mut self, flag: bool) -> Self { + self &= !(FlagsType::NX); + if flag { + self | FlagsType::NX + } else { + self + } + } + + pub fn writeable(mut self, flag: bool) -> Self { + self &= !(FlagsType::WRITEABLE); + if flag { + self | FlagsType::WRITEABLE + } else { + self + } + } +} + +impl TranslationChunk
{ + pub fn update_flags(&mut self, mmu_def: &ArchMmuDef) { + self.prev_flags = FlagsType::NONE + .writeable((mmu_def.writeable_bit)( + self.pt_addr, + self.prev_flags.contains(FlagsType::WRITEABLE), + )) + .nx((mmu_def.nx_bit)( + self.pt_addr, + self.prev_flags.contains(FlagsType::NX), + )); + } +} + +impl TranslationChunk { + pub fn new(pt_addr: T, prev_flags: FlagsType) -> Self { + let (min, max) = (!0u64, 0u64); + Self::with_minmax(pt_addr, prev_flags, min.into(), max.into()) + } + + pub fn with_minmax( + pt_addr: T, + prev_flags: FlagsType, + min_addr: Address, + max_addr: Address, + ) -> Self { + Self { + pt_addr, + addr_count: 0, + step: 0, + min_addr, + max_addr, + prev_flags, + } + } +} + +impl TranslationChunk { + /// Pushes data to stack updating min/max bounds + pub fn push_data( + &mut self, + data: TranslateData, + stack: &mut TranslateDataVec, + ) { + self.min_addr = std::cmp::min(self.min_addr, data.addr); + self.max_addr = std::cmp::max(self.max_addr, data.addr + data.length()); + self.addr_count += 1; + stack.push(data); + } + + /// Pops the address from stack without modifying bounds + pub fn pop_data( + &mut self, + stack: &mut TranslateDataVec, + ) -> Option> { + if self.addr_count > 0 { + self.addr_count -= 1; + stack.pop() + } else { + None + } + } + + pub fn next_max_addr_count(&self, spec: &ArchMmuSpec) -> umem { + let step_size = spec.page_size_step_unchecked(self.step + 1); + + let addr_diff = self.max_addr.wrapping_sub(self.min_addr).to_umem(); + let add = (addr_diff % step_size != 0) as umem; + + self.addr_count as umem * (addr_diff / step_size + add) + } + + /// Splits the chunk into multiple smaller ones for the next VTOP step. + pub fn split_chunk( + mut self, + spec: &ArchMmuSpec, + (addr_stack, tmp_addr_stack): (&mut TranslateDataVec, &mut TranslateDataVec), + out_target: &mut (TranslateVec, TranslateDataVec), + wait_target: &mut (TranslateVec, TranslateDataVec), + ) { + // Safety: + // We ideally would not do this, but honestly this is a better alternative + // to lifetime torture. + // The input vecs are allocated by the same functions, and the data that's being held + // should not really be lifetime dependent in the context of VTOP + let mut addr_stack = unsafe { shorten_datavec_lifetime(addr_stack) }; + let mut tmp_addr_stack = unsafe { shorten_datavec_lifetime(tmp_addr_stack) }; + let mut out_target = unsafe { shorten_pair_lifetime(out_target) }; + let mut wait_target = unsafe { shorten_pair_lifetime(wait_target) }; + + let align_as = spec.page_size_step_unchecked(self.step); + let step_size = spec.page_size_step_unchecked(self.step + 1); + + //TODO: mask out the addresses to limit them within address space + //this is in particular for the first step where addresses are split between positive and + //negative sides + let upper = (self.max_addr - 1usize).as_mem_aligned(step_size).to_umem(); + let lower = self.min_addr.as_mem_aligned(step_size).to_umem(); + + let mut cur_max_addr: umem = !0; + + // Walk in reverse so that lowest addresses always end up + // first in the stack. This preserves translation order + for (cnt, addr) in (0..=((upper - lower) / step_size)) + .map(|i| upper - i * step_size) + .enumerate() + { + if addr > cur_max_addr { + continue; + } + + cur_max_addr = 0; + + // Also, we need to push the upper elements to the waiting stack preemptively... + // This might result in slight performance loss, but keeps the order + let remaining = (addr - lower) / step_size + 1; + + let (chunks_out, addrs_out) = if out_target.0.capacity() as umem + >= out_target.0.len() as umem + remaining + && out_target.1.capacity() as umem + >= out_target.1.len() as umem + self.addr_count as umem * remaining + { + &mut out_target + } else { + &mut wait_target + }; + + let addr = Address::from(addr); + let addr_aligned = addr.as_mem_aligned(align_as); + let index = (addr - addr_aligned) as umem / step_size; + let (pt_addr, _) = self.pt_addr.get_pt_by_index(index as usize); + let pt_addr = spec.vtop_step(pt_addr, addr, self.step); + + let mut new_chunk = TranslationChunk::new(pt_addr, self.prev_flags); + + // Go through each address and check it individually + for _ in 0..self.addr_count { + let data = self.pop_data(addr_stack).unwrap(); + + debug_assert!( + data.addr >= self.min_addr, + "__ {} {:x}+{:x} | {:#?}", + cnt, + data.addr, + data.length(), + &self + ); + debug_assert!( + data.addr + data.length() <= self.max_addr, + "{} {:x}+{:x} | {:#?}", + cnt, + data.addr, + data.length(), + &self + ); + + let (left, right) = data.split_at_address(addr); + + if let Some(data) = right { + new_chunk.push_data(data, addrs_out); + } + + // There was some leftover data + if let Some(data) = left { + cur_max_addr = + std::cmp::max((data.addr + data.length()).to_umem(), cur_max_addr); + self.push_data(data, tmp_addr_stack); + } + } + + if new_chunk.addr_count > 0 { + new_chunk.step = self.step; + chunks_out.push(new_chunk); + } + + std::mem::swap(&mut addr_stack, &mut tmp_addr_stack); + } + + debug_assert!(self.addr_count == 0); + } +} diff --git a/apex_dma/memflow_lib/memflow/src/mem/virt_translate/mod.rs b/apex_dma/memflow_lib/memflow/src/mem/virt_translate/mod.rs new file mode 100644 index 0000000..bff02fc --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/mem/virt_translate/mod.rs @@ -0,0 +1,951 @@ +//! Virtual address translation +//! +//! This module describes virtual to physical address translation interfaces. +//! +//! * [VirtualTranslate](VirtualTranslate) - user facing trait providing a way to translate +//! addresses. +//! +//! * [VirtualTranslate2](VirtualTranslate2) - internally used trait that translates pairs of +//! buffers and virtual addresses into pairs of buffers and their corresponding physical addresses. +//! Is used to provide [virtual memory view](crate::mem::virt_mem::virtual_dma). This trait is also +//! a [point of caching](crate::mem::virt_translate::cache) for the translations. +//! +//! * [VirtualTranslate3](VirtualTranslate3) - a sub-scope that translates addresses of a single +//! address space. Objects that implement VirtualTranslate3 are designed to be cheap to construct, +//! because they use pooled resources from VirtualTranslate2 objects. This is equivalent to storing +//! a single VirtualTranslate2 state for the OS, while constructing VirtualTranslate3 instances for +//! each process. This is precisely what is being done in our Win32 OS (see +//! [here](https://github.com/memflow/memflow-win32/blob/791bb7afb8a984034dde314c136b7675b44e3abf/src/win32/process.rs#L348), +//! and +//! [here](https://github.com/memflow/memflow-win32/blob/791bb7afb8a984034dde314c136b7675b44e3abf/src/win32/process.rs#L314)). +//! +//! Below figure shows entire pipeline of a virtual address translating object with caching. +//! +//! ```text +//! +--------------------------+ +//! | (Win32Process) | +//! | VirtualTranslate | (Contains VT2+VT3+Phys) +//! | MemoryView | +//! +--------------------------+ +//! | +//! | +//! +-----------+--------------+ +//! | (CachedVirtualTranslate) | (Accepts VT3+Phys) +//! | VirtualTranslate2 | (Point of caching) +//! +--------------------------+ +//! | +//! | +//! +--------+----------+ +//! | (DirectTranslate) | (Accepts VT3+Phys) +//! | VirtualTranslate2 | (Contains 64MB buffer) +//! +-------------------+ +//! | +//! | +//! +----------+-------------+ +//! | (X86 VirtualTranslate) | (Accepts 64MB buffer+Phys) +//! | VirtualTranslate3 | (Contains CR3+ArchMmuSpec) +//! +------------------------+ +//! | +//! | +//! +------+------+ +//! | ArchMmuSpec | (Accepts translation root (CR3), buffer, Phys) +//! +-------------+ (Contains architecture specification) +//! | +//! | +//! +-------+--------+ +//! | PhysicalMemory | (Accepts special page flags) +//! +----------------+ +//! | +//! | +//! ... (Further nesting) +//! ``` + +use std::prelude::v1::*; + +use super::{MemoryRange, MemoryRangeCallback, VtopRange}; + +use std::cmp::*; + +use cglue::prelude::v1::*; +use itertools::Itertools; + +pub mod direct_translate; +use crate::iter::SplitAtIndex; +pub use direct_translate::DirectTranslate; + +use crate::architecture::ArchitectureObj; +use crate::types::gap_remover::GapRemover; + +#[macro_use] +pub mod mmu; + +pub mod cache; + +pub use cache::*; + +#[cfg(test)] +mod tests; + +use crate::error::{Result, *}; + +use crate::mem::PhysicalMemory; +use crate::types::{imem, umem, Address, Page, PhysicalAddress}; + +/// Translates virtual addresses into physical ones. +/// +/// This is a simple user-facing trait to perform virtual address translations. Implementor needs +/// to implement only 1 function - [virt_to_phys_list](VirtualTranslate::virt_to_phys_list). Other +/// functions are provided as helpers built on top of the base function. +/// +/// For overview how this trait relates to other virtual translation traits, +/// check out documentation of [this module](self). +#[cfg_attr(feature = "plugins", cglue_trait)] +#[int_result] +pub trait VirtualTranslate: Send { + /// Translate a list of address ranges into physical address space. + /// + /// This function will take addresses in `addrs` and produce translation of them into `out`. + /// Any unsuccessful ranges will be sent to `out_fail`. + /// + /// # Remarks + /// + /// Note that the number of outputs may not match the number of inputs - virtual address space + /// does not usually map linearly to the physical one, thus the input may need to be split into + /// smaller parts, which may not be combined back together. + /// + /// # Example: + /// + /// ``` + /// use memflow::prelude::v1::*; + /// # use memflow::dummy::DummyOs; + /// + /// // Virtual translation test + /// fn vtop(mem: &mut impl VirtualTranslate, addr: Address) { + /// let mut cnt = 0; + /// mem.virt_to_phys_list( + /// &[CTup2(addr, 0x2000)], + /// // Successfully translated + /// (&mut |_| { cnt += 1; true }).into(), + /// // Failed to translate + /// (&mut |v| panic!("Failed to translate: {:?}", v)).into() + /// ); + /// // We attempt to translate 2 pages, thus there are 2 outputs. + /// assert_eq!(2, cnt); + /// } + /// # let mut proc = DummyOs::quick_process(size::mb(2), &[]); + /// # let addr = proc.info().address; + /// # vtop(&mut proc.mem, addr); + /// ``` + fn virt_to_phys_list( + &mut self, + addrs: &[VtopRange], + out: VirtualTranslationCallback, + out_fail: VirtualTranslationFailCallback, + ); + + /// Translate a single virtual address range into physical address space. + /// + /// This function is a helper for [`virt_to_phys_list`](Self::virt_to_phys_list) that translates + /// just a single range, and has no failure output. It is otherwise identical. + /// + /// # Example: + /// + /// ``` + /// use memflow::prelude::v1::*; + /// # use memflow::dummy::DummyOs; + /// + /// // Virtual translation test + /// fn vtop(mem: &mut impl VirtualTranslate, addr: Address) { + /// let mut cnt = 0; + /// mem.virt_to_phys_range( + /// addr, addr + 0x2000, + /// // Successfully translated + /// (&mut |_| { cnt += 1; true }).into(), + /// ); + /// // We attempt to translate 2 pages, thus there are 2 outputs. + /// assert_eq!(2, cnt); + /// } + /// # let mut proc = DummyOs::quick_process(size::mb(2), &[]); + /// # let addr = proc.info().address; + /// # vtop(&mut proc.mem, addr); + /// ``` + fn virt_to_phys_range( + &mut self, + start: Address, + end: Address, + out: VirtualTranslationCallback, + ) { + assert!(end >= start); + self.virt_to_phys_list( + &[CTup2(start, (end - start) as umem)], + out, + (&mut |_| true).into(), + ) + } + + /// Translate a single virtual address range into physical address space and coalesce nearby + /// regions. + /// + /// This function is nearly identical to [`virt_to_phys_range`](Self::virt_to_phys_range), however, + /// it performs additional post-processing of the output to combine consecutive ranges, and + /// output them in sorted order (by input virtual address). + /// + /// # Example: + /// + /// ``` + /// use memflow::prelude::v1::*; + /// use memflow::dummy::{DummyOs, DummyMemory}; + /// + /// // Create a dummy OS + /// let mem = DummyMemory::new(size::mb(1)); + /// let mut os = DummyOs::new(mem); + /// + /// // Create a process with 1+10 randomly placed regions + /// let pid = os.alloc_process(size::kb(4), &[]); + /// let proc = os.process_by_pid(pid).unwrap().proc; + /// os.process_alloc_random_mem(&proc, 10, 1); + /// let mut mem = os.process_by_pid(pid).unwrap().mem; + /// + /// // Translate entire address space + /// let mut output = vec![]; + /// + /// mem.virt_translation_map_range( + /// Address::null(), + /// Address::invalid(), + /// (&mut output).into() + /// ); + /// + /// // There should be 11 memory ranges. + /// assert_eq!(11, output.len()); + /// ``` + fn virt_translation_map_range( + &mut self, + start: Address, + end: Address, + out: VirtualTranslationCallback, + ) { + let mut set = std::collections::BTreeSet::new(); + + self.virt_to_phys_range( + start, + end, + (&mut |v| { + set.insert(v); + true + }) + .into(), + ); + + set.into_iter() + .coalesce(|a, b| { + // TODO: Probably make the page size reflect the merge + if b.in_virtual == (a.in_virtual + a.size) + && b.out_physical.address() == (a.out_physical.address() + a.size) + && a.out_physical.page_type() == b.out_physical.page_type() + { + Ok(VirtualTranslation { + in_virtual: a.in_virtual, + size: a.size + b.size, + out_physical: a.out_physical, + }) + } else { + Err((a, b)) + } + }) + .feed_into(out); + } + + /// Retrieves mapped virtual pages in the specified range. + /// + /// In case a range from [`Address::null()`], [`Address::invalid()`] is specified + /// this function will return all mappings. + /// + /// Given negative gap size, they will not be removed. + /// + /// # Example: + /// + /// ``` + /// use memflow::prelude::v1::*; + /// # use memflow::dummy::{DummyMemory, DummyOs}; + /// # use memflow::architecture::x86::x64; + /// # let dummy_mem = DummyMemory::new(size::mb(16)); + /// # let mut dummy_os = DummyOs::new(dummy_mem); + /// # let (dtb, virt_base) = dummy_os.alloc_dtb(size::mb(2), &[]); + /// # let translator = x64::new_translator(dtb); + /// # let arch = x64::ARCH; + /// # let mut virt_mem = VirtualDma::new(dummy_os.forward_mut(), arch, translator); + /// println!("{:>16} {:>12} {:<}", "ADDR", "SIZE", "TYPE"); + /// + /// let callback = &mut |CTup3(addr, size, pagety)| { + /// println!("{:>16x} {:>12x} {: Result { + let mut out = Err(Error(ErrorOrigin::VirtualTranslate, ErrorKind::OutOfBounds)); + + self.virt_to_phys_list( + &[CTup2(address, 1)], + (&mut |VirtualTranslation { + in_virtual: _, + size: _, + out_physical, + }| { + out = Ok(out_physical); + false + }) + .into(), + (&mut |_| true).into(), + ); + + out + } + + /// Retrieve page information at virtual address. + /// + /// This function is equivalent to calling + /// [containing_page](crate::types::physical_address::PhysicalAddress::containing_page) on + /// [`virt_to_phys`](Self::virt_to_phys) result. + /// + /// # Example: + /// + /// ``` + /// use memflow::prelude::v1::*; + /// # use memflow::dummy::DummyOs; + /// + /// // Virtual translation test + /// fn vtop(mem: &mut impl VirtualTranslate, addr: Address) { + /// let page = mem.virt_page_info(addr).unwrap(); + /// assert_eq!(page.page_size, mem::kb(4)); + /// assert_eq!(page.page_type, PageType::WRITEABLE); + /// } + /// # let mut proc = DummyOs::quick_process(size::mb(2), &[]); + /// # let addr = proc.info().address; + /// # vtop(&mut proc.mem, addr); + /// ``` + fn virt_page_info(&mut self, addr: Address) -> Result { + let paddr = self.virt_to_phys(addr)?; + Ok(paddr.containing_page()) + } + + /// Retrieve a vector of physical pages within given range. + /// + /// This is equivalent to calling [`virt_page_map_range`](Self::virt_page_map_range) with a + /// vector output argument. + /// + /// # Example: + /// + /// ``` + /// use memflow::prelude::v1::*; + /// # use memflow::dummy::{DummyMemory, DummyOs}; + /// # use memflow::architecture::x86::x64; + /// # let dummy_mem = DummyMemory::new(size::mb(16)); + /// # let mut dummy_os = DummyOs::new(dummy_mem); + /// # let (dtb, virt_base) = dummy_os.alloc_dtb(size::mb(2), &[]); + /// # let translator = x64::new_translator(dtb); + /// # let arch = x64::ARCH; + /// # let mut virt_mem = VirtualDma::new(dummy_os.forward_mut(), arch, translator); + /// println!("{:>16} {:>12} {:<}", "ADDR", "SIZE", "TYPE"); + /// + /// // display all mappings with a gap size of 0 + /// let out = virt_mem.virt_page_map_range_vec(0, Address::null(), Address::invalid()); + /// + /// assert!(out.len() > 0); + /// + /// for CTup3(addr, size, pagety) in out { + /// println!("{:>16x} {:>12x} {: Vec { + let mut out = vec![]; + self.virt_page_map_range(gap_size, start, end, (&mut out).into()); + out + } + + // page map helpers + + /// Get virtual translation map over entire address space. + /// + /// This is equivalent to [`virt_translation_map_range`](Self::virt_translation_map_range) with a + /// range from null to highest address. + /// + /// # Example: + /// + /// ``` + /// use memflow::prelude::v1::*; + /// use memflow::dummy::{DummyOs, DummyMemory}; + /// + /// // Create a dummy OS + /// let mem = DummyMemory::new(size::mb(1)); + /// let mut os = DummyOs::new(mem); + /// + /// // Create a process with 1+10 randomly placed regions + /// let pid = os.alloc_process(size::kb(4), &[]); + /// let proc = os.process_by_pid(pid).unwrap().proc; + /// os.process_alloc_random_mem(&proc, 10, 1); + /// let mut mem = os.process_by_pid(pid).unwrap().mem; + /// + /// // Translate entire address space + /// let mut output = vec![]; + /// + /// mem.virt_translation_map((&mut output).into()); + /// + /// // There should be 11 memory ranges. + /// assert_eq!(11, output.len()); + /// ``` + fn virt_translation_map(&mut self, out: VirtualTranslationCallback) { + self.virt_translation_map_range(Address::null(), Address::invalid(), out) + } + + /// Get virtual translation map over entire address space and return it as a vector. + /// + /// This is a [`virt_translation_map`](Self::virt_translation_map) helper that stores results + /// into a vector that gets returned. + /// + /// # Example: + /// + /// ``` + /// use memflow::prelude::v1::*; + /// use memflow::dummy::{DummyOs, DummyMemory}; + /// + /// // Create a dummy OS + /// let mem = DummyMemory::new(size::mb(1)); + /// let mut os = DummyOs::new(mem); + /// + /// // Create a process with 1+10 randomly placed regions + /// let pid = os.alloc_process(size::kb(4), &[]); + /// let proc = os.process_by_pid(pid).unwrap().proc; + /// os.process_alloc_random_mem(&proc, 10, 1); + /// let mut mem = os.process_by_pid(pid).unwrap().mem; + /// + /// // Translate entire address space + /// let output = mem.virt_translation_map_vec(); + /// + /// // There should be 11 memory ranges. + /// assert_eq!(11, output.len()); + /// ``` + #[skip_func] + fn virt_translation_map_vec(&mut self) -> Vec { + let mut out = vec![]; + self.virt_translation_map((&mut out).into()); + out + } + + /// Attempt to translate a physical address into a virtual one. + /// + /// This function is the reverse of [`virt_to_phys`](Self::virt_to_phys). Note, that there + /// could could be multiple virtual addresses for one physical address. If all candidates + /// are needed, use [`phys_to_virt_vec`](Self::phys_to_virt_vec) function. + /// + /// # Example: + /// + /// ``` + /// use memflow::prelude::v1::*; + /// # use memflow::dummy::DummyOs; + /// + /// // Virtual translation and reversal test + /// fn vtop_ptov(mem: &mut impl VirtualTranslate, addr: Address) { + /// let paddr = mem.virt_to_phys(addr).unwrap(); + /// let vaddr = mem.phys_to_virt(paddr.address()); + /// assert_eq!(vaddr, Some(addr)); + /// } + /// # let mut proc = DummyOs::quick_process(size::mb(2), &[]); + /// # let addr = proc.info().address; + /// # vtop_ptov(&mut proc.mem, addr); + /// ``` + fn phys_to_virt(&mut self, phys: Address) -> Option
{ + let mut virt = None; + + let callback = &mut |VirtualTranslation { + in_virtual, + size: _, + out_physical, + }| { + if out_physical.address() == phys { + virt = Some(in_virtual); + false + } else { + true + } + }; + + self.virt_translation_map(callback.into()); + + virt + } + + /// Retrieve all virtual address that map into a given physical address. + /// + /// This function is the reverse of [`virt_to_phys`](Self::virt_to_phys), and it retrieves all + /// physical addresses that map to this virtual address. + /// + /// # Example: + /// + /// ``` + /// use memflow::prelude::v1::*; + /// # use memflow::dummy::DummyOs; + /// + /// // Virtual translation and reversal test + /// fn vtop_ptov(mem: &mut impl VirtualTranslate, addr: Address) { + /// let paddr = mem.virt_to_phys(addr).unwrap(); + /// let vaddrs = mem.phys_to_virt_vec(paddr.address()); + /// assert_eq!(&vaddrs, &[addr]); + /// } + /// # let mut proc = DummyOs::quick_process(size::mb(2), &[]); + /// # let addr = proc.info().address; + /// # vtop_ptov(&mut proc.mem, addr); + /// ``` + #[skip_func] + fn phys_to_virt_vec(&mut self, phys: Address) -> Vec
{ + let mut virt = vec![]; + + let callback = &mut |VirtualTranslation { + in_virtual, + size: _, + out_physical, + }| { + if out_physical.address() == phys { + virt.push(in_virtual); + true + } else { + true + } + }; + + self.virt_translation_map(callback.into()); + + virt + } + + /// Retrieves all mapped virtual pages. + /// + /// The [`virt_page_map`](Self::virt_page_map) function is a convenience wrapper for calling + /// [`virt_page_map_range`](Self::virt_page_map_range)`(gap_size, Address::null(), Address::invalid(), out)`. + /// + /// # Example: + /// + /// ``` + /// use memflow::prelude::v1::*; + /// # use memflow::dummy::{DummyMemory, DummyOs}; + /// # use memflow::architecture::x86::x64; + /// # let dummy_mem = DummyMemory::new(size::mb(16)); + /// # let mut dummy_os = DummyOs::new(dummy_mem); + /// # let (dtb, virt_base) = dummy_os.alloc_dtb(size::mb(2), &[]); + /// # let translator = x64::new_translator(dtb); + /// # let arch = x64::ARCH; + /// # let mut virt_mem = VirtualDma::new(dummy_os.forward_mut(), arch, translator); + /// println!("{:>16} {:>12} {:<}", "ADDR", "SIZE", "TYPE"); + /// + /// let callback = &mut |CTup3(addr, size, pagety)| { + /// println!("{:>16x} {:>12x} {:16} {:>12} {:<}", "ADDR", "SIZE", "TYPE"); + /// for CTup3(addr, size, pagety) in maps.iter() { + /// println!("{:>16x} {:>12x} {: Vec { + let mut out = vec![]; + self.virt_page_map(gap_size, (&mut out).into()); + out + } +} + +pub type VirtualTranslationCallback<'a> = OpaqueCallback<'a, VirtualTranslation>; +pub type VirtualTranslationFailCallback<'a> = OpaqueCallback<'a, VirtualTranslationFail>; + +/// Virtual page range information with physical mappings used for callbacks +#[repr(C)] +#[derive(Clone, Debug, Eq, Copy)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +#[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))] +pub struct VirtualTranslation { + pub in_virtual: Address, + pub size: umem, + pub out_physical: PhysicalAddress, +} + +impl Ord for VirtualTranslation { + fn cmp(&self, other: &Self) -> Ordering { + self.in_virtual.cmp(&other.in_virtual) + } +} + +impl PartialOrd for VirtualTranslation { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialEq for VirtualTranslation { + fn eq(&self, other: &Self) -> bool { + self.in_virtual == other.in_virtual + } +} + +#[repr(C)] +#[derive(Clone, Copy, Debug)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +#[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))] +pub struct VirtualTranslationFail { + pub from: Address, + pub size: umem, +} + +pub trait VirtualTranslate2 +where + Self: Send, +{ + /// Translate a list of virtual addresses + /// + /// This function will do a virtual to physical memory translation for the + /// `VirtualTranslate3` over multiple elements. + /// + /// In most cases, you will want to use the `VirtualDma`, but this trait is provided if needed + /// to implement some more advanced filtering. + /// + /// # Examples + /// + /// ``` + /// # use memflow::error::Result; + /// # use memflow::types::{PhysicalAddress, Address, umem}; + /// # use memflow::dummy::{DummyMemory, DummyOs}; + /// use memflow::mem::{VirtualTranslate2, DirectTranslate}; + /// use memflow::types::size; + /// use memflow::architecture::x86::x64; + /// use memflow::cglue::{FromExtend, CTup3}; + /// + /// use std::convert::TryInto; + /// + /// # const VIRT_MEM_SIZE: usize = size::mb(8) as usize; + /// # const CHUNK_SIZE: usize = 2; + /// # + /// # let mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(mem); + /// # let (dtb, virtual_base) = os.alloc_dtb(VIRT_MEM_SIZE, &[]); + /// # let mut mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let arch = x64::ARCH; + /// + /// let mut buffer = vec![0; VIRT_MEM_SIZE * CHUNK_SIZE / arch.page_size()]; + /// let buffer_length = buffer.len(); + /// + /// // In this example, 8 megabytes starting from `virtual_base` are mapped in. + /// // We translate 2 bytes chunks over the page boundaries. These bytes will be + /// // split off into 2 separate translated chunks. + /// let addresses = buffer + /// .chunks_mut(CHUNK_SIZE) + /// .enumerate() + /// .map(|(i, buf)| CTup3(virtual_base + ((i + 1) * size::kb(4) - 1), Address::NULL, buf)); + /// + /// let mut translated_data = vec![]; + /// let mut failed_translations = &mut |_| true; + /// + /// let mut direct_translate = DirectTranslate::new(); + /// + /// direct_translate.virt_to_phys_iter( + /// &mut mem, + /// &translator, + /// addresses, + /// &mut translated_data.from_extend(), + /// &mut failed_translations.into(), + /// ); + /// + /// + /// // We tried to translate one byte out of the mapped memory, it had to fail + /// assert_eq!(translated_data.len(), buffer_length - 1); + /// + /// # Ok::<(), memflow::error::Error>(()) + /// ``` + fn virt_to_phys_iter( + &mut self, + phys_mem: &mut T, + translator: &D, + addrs: VI, + out: &mut VtopOutputCallback, + out_fail: &mut VtopFailureCallback, + ) where + T: PhysicalMemory + ?Sized, + B: SplitAtIndex, + D: VirtualTranslate3, + VI: Iterator>; + + /// Translate a single virtual address + /// + /// This function will do a virtual to physical memory translation for the + /// `VirtualTranslate3` for single address returning either PhysicalAddress, or an error. + /// + /// # Examples + /// ``` + /// # use memflow::error::Result; + /// # use memflow::types::{PhysicalAddress, Address, umem}; + /// # use memflow::dummy::{DummyMemory, DummyOs}; + /// # use memflow::types::size; + /// # use memflow::mem::VirtualTranslate3; + /// use memflow::mem::{VirtualTranslate2, DirectTranslate}; + /// use memflow::architecture::x86::x64; + /// + /// # const VIRT_MEM_SIZE: usize = size::mb(8); + /// # const CHUNK_SIZE: usize = 2; + /// # + /// # let mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(mem); + /// # let (dtb, virtual_base) = os.alloc_dtb(VIRT_MEM_SIZE, &[]); + /// # let mut mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let arch = x64::ARCH; + /// + /// let mut direct_translate = DirectTranslate::new(); + /// + /// // Translate a mapped address + /// let res = direct_translate.virt_to_phys( + /// &mut mem, + /// &translator, + /// virtual_base, + /// ); + /// + /// assert!(res.is_ok()); + /// + /// // Translate unmapped address + /// let res = direct_translate.virt_to_phys( + /// &mut mem, + /// &translator, + /// virtual_base - 1, + /// ); + /// + /// assert!(res.is_err()); + /// + /// ``` + fn virt_to_phys( + &mut self, + phys_mem: &mut T, + translator: &D, + vaddr: Address, + ) -> Result { + let mut output = None; + let success = &mut |elem: CTup3| { + if output.is_none() { + output = Some(elem.0); + } + false + }; + let mut output_err = None; + let fail = &mut |elem: (Error, _)| { + output_err = Some(elem.0); + true + }; + + self.virt_to_phys_iter( + phys_mem, + translator, + Some(CTup3::<_, _, umem>(vaddr, vaddr, 1)).into_iter(), + &mut success.into(), + &mut fail.into(), + ); + output.map(Ok).unwrap_or_else(|| Err(output_err.unwrap())) + } +} + +// forward impls +impl VirtualTranslate2 for P +where + T: VirtualTranslate2 + ?Sized, + P: std::ops::DerefMut + Send, +{ + #[inline] + fn virt_to_phys_iter( + &mut self, + phys_mem: &mut U, + translator: &D, + addrs: VI, + out: &mut VtopOutputCallback, + out_fail: &mut VtopFailureCallback, + ) where + U: PhysicalMemory + ?Sized, + B: SplitAtIndex, + D: VirtualTranslate3, + VI: Iterator>, + { + (**self).virt_to_phys_iter(phys_mem, translator, addrs, out, out_fail) + } +} + +/// Translates virtual memory to physical using internal translation base (usually a process' dtb) +/// +/// This trait abstracts virtual address translation for a single virtual memory scope. +/// On x86 architectures, it is a single `Address` - a CR3 register. But other architectures may +/// use multiple translation bases, or use a completely different translation mechanism (MIPS). +pub trait VirtualTranslate3: Clone + Copy + Send { + /// Translate a single virtual address + /// + /// # Examples + /// ``` + /// # use memflow::error::Result; + /// # use memflow::types::{PhysicalAddress, Address}; + /// # use memflow::dummy::{DummyMemory, DummyOs}; + /// use memflow::mem::VirtualTranslate3; + /// use memflow::architecture::x86::x64; + /// use memflow::types::{size, umem}; + /// + /// # const VIRT_MEM_SIZE: usize = size::mb(8); + /// # const CHUNK_SIZE: usize = 2; + /// # + /// # let mem = DummyMemory::new(size::mb(16)); + /// # let mut os = DummyOs::new(mem); + /// # let (dtb, virtual_base) = os.alloc_dtb(VIRT_MEM_SIZE, &[]); + /// # let mut mem = os.into_inner(); + /// # let translator = x64::new_translator(dtb); + /// let arch = x64::ARCH; + /// + /// // Translate a mapped address + /// let res = translator.virt_to_phys( + /// &mut mem, + /// virtual_base, + /// ); + /// + /// assert!(res.is_ok()); + /// + /// // Translate unmapped address + /// let res = translator.virt_to_phys( + /// &mut mem, + /// virtual_base - 1, + /// ); + /// + /// assert!(res.is_err()); + /// + /// ``` + fn virt_to_phys( + &self, + mem: &mut T, + addr: Address, + ) -> Result { + let mut buf: [std::mem::MaybeUninit; 512] = + unsafe { std::mem::MaybeUninit::uninit().assume_init() }; + let mut output = None; + let success = &mut |elem: CTup3| { + if output.is_none() { + output = Some(elem.0); + } + false + }; + let mut output_err = None; + let fail = &mut |elem: (Error, _)| { + output_err = Some(elem.0); + true + }; + self.virt_to_phys_iter( + mem, + Some(CTup3::<_, _, umem>(addr, addr, 1)).into_iter(), + &mut success.into(), + &mut fail.into(), + &mut buf, + ); + output.map(Ok).unwrap_or_else(|| Err(output_err.unwrap())) + } + + fn virt_to_phys_iter< + T: PhysicalMemory + ?Sized, + B: SplitAtIndex, + VI: Iterator>, + >( + &self, + mem: &mut T, + addrs: VI, + out: &mut VtopOutputCallback, + out_fail: &mut VtopFailureCallback, + tmp_buf: &mut [std::mem::MaybeUninit], + ); + + fn translation_table_id(&self, address: Address) -> umem; + + fn arch(&self) -> ArchitectureObj; +} + +pub type VtopOutputCallback<'a, B> = OpaqueCallback<'a, CTup3>; +pub type VtopFailureCallback<'a, B> = OpaqueCallback<'a, (Error, CTup3)>; diff --git a/apex_dma/memflow_lib/memflow/src/mem/virt_translate/tests.rs b/apex_dma/memflow_lib/memflow/src/mem/virt_translate/tests.rs index 2dea489..696f042 100644 --- a/apex_dma/memflow_lib/memflow/src/mem/virt_translate/tests.rs +++ b/apex_dma/memflow_lib/memflow/src/mem/virt_translate/tests.rs @@ -1,35 +1,40 @@ use crate::architecture::x86::x64; - -use crate::mem::dummy::DummyMemory; -use crate::mem::{DirectTranslate, VirtualDMA, VirtualMemory, VirtualTranslate}; -use crate::types::size; +use crate::cglue::ForwardMut; +use crate::dummy::{DummyMemory, DummyOs}; +use crate::mem::{ + DirectTranslate, MemoryView, PhysicalMemory, VirtualDma, VirtualTranslate, VirtualTranslate2, + VirtualTranslate3, +}; +use crate::types::{mem, size, PageType}; +use cglue::tuple::*; #[test] fn test_vtop() { - let mut dummy_mem = DummyMemory::new(size::mb(32)); + let dummy_mem = DummyMemory::new(size::mb(32)); + let mut dummy_os = DummyOs::new(dummy_mem); let virt_size = size::mb(8); - let (dtb, virt_base) = dummy_mem.alloc_dtb(virt_size, &[]); + let (dtb, virt_base) = dummy_os.alloc_dtb(virt_size, &[]); let translator = x64::new_translator(dtb); let mut vat = DirectTranslate::new(); for i in (0..virt_size).step_by(128) { let virt_base = virt_base + i; - let vtop = match vat.virt_to_phys(&mut dummy_mem, &translator, virt_base) { + let vtop = match vat.virt_to_phys(dummy_os.as_mut(), &translator, virt_base) { Err(_) => None, Ok(paddr) => Some(paddr.address()), }; - let dummy_vtop = dummy_mem.vtop(dtb, virt_base); + let dummy_vtop = dummy_os.vtop(dtb, virt_base); assert_eq!(vtop, dummy_vtop); } - for i in 0..128 { + for i in 0_u64..128 { let virt_base = virt_base + virt_size + i; - let vtop = match vat.virt_to_phys(&mut dummy_mem, &translator, virt_base) { + let vtop = match vat.virt_to_phys(dummy_os.as_mut(), &translator, virt_base) { Err(_) => None, Ok(paddr) => Some(paddr.address()), }; - let dummy_vtop = dummy_mem.vtop(dtb, virt_base); + let dummy_vtop = dummy_os.vtop(dtb, virt_base); assert!(vtop.is_none()); @@ -38,11 +43,11 @@ fn test_vtop() { for i in 0..128 { let virt_base = virt_base - i; - let vtop = match vat.virt_to_phys(&mut dummy_mem, &translator, virt_base) { + let vtop = match vat.virt_to_phys(dummy_os.as_mut(), &translator, virt_base) { Err(_) => None, Ok(paddr) => Some(paddr.address()), }; - let dummy_vtop = dummy_mem.vtop(dtb, virt_base); + let dummy_vtop = dummy_os.vtop(dtb, virt_base); assert!(i == 0 || vtop.is_none()); @@ -50,255 +55,279 @@ fn test_vtop() { } } +#[test] +fn test_x86_flag_inheritance() { + let dummy_mem = DummyMemory::new(size::mb(16)); + let mut dummy_os = DummyOs::new(dummy_mem); + let (dtb, virt_base) = dummy_os.alloc_dtb(size::mb(2), &[]); + let translator = x64::new_translator(dtb); + + let paddr = translator.virt_to_phys(&mut dummy_os, virt_base).unwrap(); + assert!(!paddr.page_type.contains(PageType::NOEXEC)); + + { + let mut phys_view = dummy_os.phys_view(); + let dtb_idx = (virt_base.to_umem() as u64 >> 39) & 0x1ffu64; + let pte = phys_view.read_addr64(dtb + dtb_idx * 8).unwrap().to_umem(); + // Set nx bit + let pte = pte | !(!0u64 >> 1); + phys_view.write(dtb + dtb_idx * 8, &pte).unwrap(); + } + + let paddr = translator.virt_to_phys(&mut dummy_os, virt_base).unwrap(); + assert!(paddr.page_type.contains(PageType::NOEXEC)); +} + #[test] fn test_virt_page_map() { - let mut dummy_mem = DummyMemory::new(size::mb(16)); - let (dtb, virt_base) = dummy_mem.alloc_dtb(size::mb(2), &[]); + let dummy_mem = DummyMemory::new(size::mb(16)); + let mut dummy_os = DummyOs::new(dummy_mem); + let (dtb, virt_base) = dummy_os.alloc_dtb(size::mb(2), &[]); let translator = x64::new_translator(dtb); let arch = x64::ARCH; - let mut virt_mem = VirtualDMA::new(&mut dummy_mem, arch, translator); + let mut virt_mem = VirtualDma::new(dummy_os.forward_mut(), arch, translator); - let page_map = virt_mem.virt_page_map(0); + let page_map = virt_mem.virt_page_map_vec(0); - for (addr, len) in page_map.iter() { - println!("{:x}-{:x} ({:x})", addr, *addr + *len, len); + for CTup3(address, size, pt) in page_map.iter() { + println!("{:x}-{:x} ({:x}) {:?}", address, *address + *size, size, pt); } assert!(page_map.len() == 1); assert_eq!(page_map[0].0, virt_base); - assert_eq!(page_map[0].1, size::mb(2)); + assert_eq!(page_map[0].1, mem::mb(2)); } #[test] fn test_virt_read_small() { - let mut dummy_mem = DummyMemory::new(size::mb(2)); + let dummy_mem = DummyMemory::new(size::mb(2)); + let mut dummy_os = DummyOs::new(dummy_mem); let mut buf = vec![0u8; 256]; for (i, item) in buf.iter_mut().enumerate() { *item = i as u8; } - let (dtb, virt_base) = dummy_mem.alloc_dtb(buf.len(), &buf); + let (dtb, virt_base) = dummy_os.alloc_dtb(buf.len(), &buf); let translator = x64::new_translator(dtb); let arch = x64::ARCH; - let mut virt_mem = VirtualDMA::new(&mut dummy_mem, arch, translator); + let mut virt_mem = VirtualDma::new(dummy_os.forward_mut(), arch, translator); let mut out = vec![0u8; buf.len()]; - virt_mem.virt_read_into(virt_base, &mut out[..]).unwrap(); + virt_mem.read_into(virt_base, &mut out[..]).unwrap(); assert_eq!(buf.len(), out.len()); assert_eq!(buf, out); } #[test] fn test_virt_write_small() { - let mut dummy_mem = DummyMemory::new(size::mb(2)); + let dummy_mem = DummyMemory::new(size::mb(2)); + let mut dummy_os = DummyOs::new(dummy_mem); let mut buf = vec![0u8; 256]; let mut input = vec![0u8; buf.len()]; for (i, item) in input.iter_mut().enumerate() { *item = i as u8; } - let (dtb, virt_base) = dummy_mem.alloc_dtb(input.len(), &input); + let (dtb, virt_base) = dummy_os.alloc_dtb(input.len(), &input); let translator = x64::new_translator(dtb); let arch = x64::ARCH; - let mut virt_mem = VirtualDMA::new(&mut dummy_mem, arch, translator); + let mut virt_mem = VirtualDma::new(dummy_os.forward_mut(), arch, translator); - virt_mem.virt_write(virt_base, &input[..]).unwrap(); - virt_mem.virt_read_into(virt_base, &mut buf[..]).unwrap(); + virt_mem.write(virt_base, &input[..]).unwrap(); + virt_mem.read_into(virt_base, &mut buf[..]).unwrap(); assert_eq!(buf.len(), input.len()); assert_eq!(buf, input); } #[test] fn test_virt_read_small_shifted() { - let mut dummy_mem = DummyMemory::new(size::mb(2)); + let dummy_mem = DummyMemory::new(size::mb(2)); + let mut dummy_os = DummyOs::new(dummy_mem); let mut buf = vec![0u8; 256]; for (i, item) in buf.iter_mut().enumerate() { *item = i as u8; } - let (dtb, virt_base) = dummy_mem.alloc_dtb(buf.len(), &buf); + let (dtb, virt_base) = dummy_os.alloc_dtb(buf.len(), &buf); let translator = x64::new_translator(dtb); let arch = x64::ARCH; - let mut virt_mem = VirtualDMA::new(&mut dummy_mem, arch, translator); + let mut virt_mem = VirtualDma::new(dummy_os.forward_mut(), arch, translator); let mut out = vec![0u8; buf.len() - 128]; - virt_mem - .virt_read_into(virt_base + 128, &mut out[..]) - .unwrap(); + virt_mem.read_into(virt_base + 128, &mut out[..]).unwrap(); assert_eq!(buf[128..].to_vec().len(), out.len()); assert_eq!(buf[128..].to_vec(), out); } #[test] fn test_virt_write_small_shifted() { - let mut dummy_mem = DummyMemory::new(size::mb(2)); - let mut buf = vec![0u8; 128]; + let dummy_mem = DummyMemory::new(size::mb(2)); + let mut dummy_os = DummyOs::new(dummy_mem); + let mut buf = [0u8; 128]; let mut input = vec![0u8; buf.len()]; for (i, item) in input.iter_mut().enumerate() { *item = i as u8; } - let (dtb, virt_base) = dummy_mem.alloc_dtb(input.len(), &input); + let (dtb, virt_base) = dummy_os.alloc_dtb(input.len(), &input); let translator = x64::new_translator(dtb); let arch = x64::ARCH; - let mut virt_mem = VirtualDMA::new(&mut dummy_mem, arch, translator); + let mut virt_mem = VirtualDma::new(dummy_os.forward_mut(), arch, translator); - virt_mem.virt_write(virt_base + 128, &input[..]).unwrap(); - virt_mem - .virt_read_into(virt_base + 128, &mut buf[..]) - .unwrap(); + virt_mem.write(virt_base + 128, &input[..]).unwrap(); + virt_mem.read_into(virt_base + 128, &mut buf[..]).unwrap(); assert_eq!(buf.to_vec().len(), input.len()); assert_eq!(buf.to_vec(), input); } #[test] fn test_virt_read_medium() { - let mut dummy_mem = DummyMemory::new(size::mb(2)); + let dummy_mem = DummyMemory::new(size::mb(2)); + let mut dummy_os = DummyOs::new(dummy_mem); let mut buf = vec![0u8; 0x1000]; for (i, item) in buf.iter_mut().enumerate() { *item = i as u8; } - let (dtb, virt_base) = dummy_mem.alloc_dtb(buf.len(), &buf); + let (dtb, virt_base) = dummy_os.alloc_dtb(buf.len(), &buf); let translator = x64::new_translator(dtb); let arch = x64::ARCH; - let mut virt_mem = VirtualDMA::new(&mut dummy_mem, arch, translator); + let mut virt_mem = VirtualDma::new(dummy_os.forward_mut(), arch, translator); let mut out = vec![0u8; buf.len()]; - virt_mem.virt_read_into(virt_base, &mut out[..]).unwrap(); + virt_mem.read_into(virt_base, &mut out[..]).unwrap(); assert_eq!(buf.len(), out.len()); assert_eq!(buf, out); } #[test] fn test_virt_write_medium() { - let mut dummy_mem = DummyMemory::new(size::mb(2)); + let dummy_mem = DummyMemory::new(size::mb(2)); + let mut dummy_os = DummyOs::new(dummy_mem); let mut buf = vec![0u8; 0x1000]; let mut input = vec![0u8; buf.len()]; for (i, item) in input.iter_mut().enumerate() { *item = i as u8; } - let (dtb, virt_base) = dummy_mem.alloc_dtb(input.len(), &input); + let (dtb, virt_base) = dummy_os.alloc_dtb(input.len(), &input); let translator = x64::new_translator(dtb); let arch = x64::ARCH; - let mut virt_mem = VirtualDMA::new(&mut dummy_mem, arch, translator); + let mut virt_mem = VirtualDma::new(dummy_os.forward_mut(), arch, translator); - virt_mem.virt_write(virt_base, &input[..]).unwrap(); - virt_mem.virt_read_into(virt_base, &mut buf[..]).unwrap(); + virt_mem.write(virt_base, &input[..]).unwrap(); + virt_mem.read_into(virt_base, &mut buf[..]).unwrap(); assert_eq!(buf.len(), input.len()); assert_eq!(buf, input); } #[test] fn test_virt_read_medium_shifted() { - let mut dummy_mem = DummyMemory::new(size::mb(2)); + let dummy_mem = DummyMemory::new(size::mb(2)); + let mut dummy_os = DummyOs::new(dummy_mem); let mut buf = vec![0u8; 0x1000]; for (i, item) in buf.iter_mut().enumerate() { *item = i as u8; } - let (dtb, virt_base) = dummy_mem.alloc_dtb(buf.len(), &buf); + let (dtb, virt_base) = dummy_os.alloc_dtb(buf.len(), &buf); let translator = x64::new_translator(dtb); let arch = x64::ARCH; - let mut virt_mem = VirtualDMA::new(&mut dummy_mem, arch, translator); + let mut virt_mem = VirtualDma::new(dummy_os.forward_mut(), arch, translator); let mut out = vec![0u8; buf.len() - 0x100]; - virt_mem - .virt_read_into(virt_base + 0x100, &mut out[..]) - .unwrap(); + virt_mem.read_into(virt_base + 0x100, &mut out[..]).unwrap(); assert_eq!(buf[0x100..].to_vec().len(), out.len()); assert_eq!(buf[0x100..].to_vec(), out); } #[test] fn test_virt_write_medium_shifted() { - let mut dummy_mem = DummyMemory::new(size::mb(2)); + let dummy_mem = DummyMemory::new(size::mb(2)); + let mut dummy_os = DummyOs::new(dummy_mem); let mut buf = vec![0u8; 0x1000 - 0x100]; let mut input = vec![0u8; buf.len()]; for (i, item) in input.iter_mut().enumerate() { *item = i as u8; } - let (dtb, virt_base) = dummy_mem.alloc_dtb(input.len(), &input); + let (dtb, virt_base) = dummy_os.alloc_dtb(input.len(), &input); let translator = x64::new_translator(dtb); let arch = x64::ARCH; - let mut virt_mem = VirtualDMA::new(&mut dummy_mem, arch, translator); + let mut virt_mem = VirtualDma::new(dummy_os.forward_mut(), arch, translator); - virt_mem.virt_write(virt_base + 0x100, &input[..]).unwrap(); - virt_mem - .virt_read_into(virt_base + 0x100, &mut buf[..]) - .unwrap(); + virt_mem.write(virt_base + 0x100, &input[..]).unwrap(); + virt_mem.read_into(virt_base + 0x100, &mut buf[..]).unwrap(); assert_eq!(buf.to_vec().len(), input.len()); assert_eq!(buf.to_vec(), input); } #[test] fn test_virt_read_big() { - let mut dummy_mem = DummyMemory::new(size::mb(2)); + let dummy_mem = DummyMemory::new(size::mb(2)); + let mut dummy_os = DummyOs::new(dummy_mem); let mut buf = vec![0u8; 0x1000 * 16]; for (i, item) in buf.iter_mut().enumerate() { *item = i as u8; } - let (dtb, virt_base) = dummy_mem.alloc_dtb(buf.len(), &buf); + let (dtb, virt_base) = dummy_os.alloc_dtb(buf.len(), &buf); let translator = x64::new_translator(dtb); let arch = x64::ARCH; - let mut virt_mem = VirtualDMA::new(&mut dummy_mem, arch, translator); + let mut virt_mem = VirtualDma::new(dummy_os.forward_mut(), arch, translator); let mut out = vec![0u8; buf.len()]; - virt_mem.virt_read_into(virt_base, &mut out[..]).unwrap(); + virt_mem.read_into(virt_base, &mut out[..]).unwrap(); assert_eq!(buf.len(), out.len()); assert_eq!(buf, out); } #[test] fn test_virt_write_big() { - let mut dummy_mem = DummyMemory::new(size::mb(2)); + let dummy_mem = DummyMemory::new(size::mb(2)); + let mut dummy_os = DummyOs::new(dummy_mem); let mut buf = vec![0u8; 0x1000 * 16]; let mut input = vec![0u8; buf.len()]; for (i, item) in input.iter_mut().enumerate() { *item = i as u8; } - let (dtb, virt_base) = dummy_mem.alloc_dtb(input.len(), &input); + let (dtb, virt_base) = dummy_os.alloc_dtb(input.len(), &input); let translator = x64::new_translator(dtb); let arch = x64::ARCH; - let mut virt_mem = VirtualDMA::new(&mut dummy_mem, arch, translator); + let mut virt_mem = VirtualDma::new(dummy_os.forward_mut(), arch, translator); - virt_mem.virt_write(virt_base, &input[..]).unwrap(); - virt_mem.virt_read_into(virt_base, &mut buf[..]).unwrap(); + virt_mem.write(virt_base, &input[..]).unwrap(); + virt_mem.read_into(virt_base, &mut buf[..]).unwrap(); assert_eq!(buf.len(), input.len()); assert_eq!(buf, input); } #[test] fn test_virt_read_big_shifted() { - let mut dummy_mem = DummyMemory::new(size::mb(2)); + let dummy_mem = DummyMemory::new(size::mb(2)); + let mut dummy_os = DummyOs::new(dummy_mem); let mut buf = vec![0u8; 0x1000 * 16]; for (i, item) in buf.iter_mut().enumerate() { *item = i as u8; } - let (dtb, virt_base) = dummy_mem.alloc_dtb(buf.len(), &buf); + let (dtb, virt_base) = dummy_os.alloc_dtb(buf.len(), &buf); let translator = x64::new_translator(dtb); let arch = x64::ARCH; - let mut virt_mem = VirtualDMA::new(&mut dummy_mem, arch, translator); + let mut virt_mem = VirtualDma::new(dummy_os.forward_mut(), arch, translator); let mut out = vec![0u8; buf.len() - 0x100]; - virt_mem - .virt_read_into(virt_base + 0x100, &mut out[..]) - .unwrap(); + virt_mem.read_into(virt_base + 0x100, &mut out[..]).unwrap(); assert_eq!(buf[0x100..].to_vec().len(), out.len()); assert_eq!(buf[0x100..].to_vec(), out); } #[test] fn test_virt_write_big_shifted() { - let mut dummy_mem = DummyMemory::new(size::mb(2)); + let dummy_mem = DummyMemory::new(size::mb(2)); + let mut dummy_os = DummyOs::new(dummy_mem); let mut buf = vec![0u8; 0x1000 * 16 - 0x100]; let mut input = vec![0u8; buf.len()]; for (i, item) in input.iter_mut().enumerate() { *item = i as u8; } - let (dtb, virt_base) = dummy_mem.alloc_dtb(input.len(), &input); + let (dtb, virt_base) = dummy_os.alloc_dtb(input.len(), &input); let translator = x64::new_translator(dtb); let arch = x64::ARCH; - let mut virt_mem = VirtualDMA::new(&mut dummy_mem, arch, translator); + let mut virt_mem = VirtualDma::new(dummy_os.forward_mut(), arch, translator); - virt_mem.virt_write(virt_base + 0x100, &input[..]).unwrap(); - virt_mem - .virt_read_into(virt_base + 0x100, &mut buf[..]) - .unwrap(); + virt_mem.write(virt_base + 0x100, &input[..]).unwrap(); + virt_mem.read_into(virt_base + 0x100, &mut buf[..]).unwrap(); assert_eq!(buf.to_vec().len(), input.len()); assert_eq!(buf.to_vec(), input); } diff --git a/apex_dma/memflow_lib/memflow/src/os/keyboard.rs b/apex_dma/memflow_lib/memflow/src/os/keyboard.rs new file mode 100644 index 0000000..463f8de --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/os/keyboard.rs @@ -0,0 +1,41 @@ +//! Describes optional keyboard input for a Operating System + +use crate::cglue::*; +use crate::prelude::v1::Result; + +#[cfg_attr(feature = "plugins", cglue_trait)] +#[int_result] +pub trait OsKeyboard: Send { + #[wrap_with_obj(crate::os::keyboard::Keyboard)] + type KeyboardType<'a>: crate::os::keyboard::Keyboard + 'a + where + Self: 'a; + #[wrap_with_group(crate::os::keyboard::IntoKeyboard)] + type IntoKeyboardType: crate::os::keyboard::Keyboard + Clone + 'static; + + fn keyboard(&mut self) -> Result>; + fn into_keyboard(self) -> Result; +} + +#[cfg(feature = "plugins")] +cglue_trait_group!(IntoKeyboard, { Keyboard, Clone }, {}); + +#[cfg_attr(feature = "plugins", cglue_trait)] +#[int_result] +#[cglue_forward] +pub trait Keyboard { + #[wrap_with_obj(crate::os::keyboard::KeyboardState)] + type KeyboardStateType: crate::os::keyboard::KeyboardState; + + fn is_down(&mut self, vk: i32) -> bool; + fn set_down(&mut self, vk: i32, down: bool); + + fn state(&mut self) -> Result; +} + +#[cfg_attr(feature = "plugins", cglue_trait)] +#[int_result] +#[cglue_forward] +pub trait KeyboardState { + fn is_down(&self, vk: i32) -> bool; +} diff --git a/apex_dma/memflow_lib/memflow/src/os/mod.rs b/apex_dma/memflow_lib/memflow/src/os/mod.rs new file mode 100644 index 0000000..0921b16 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/os/mod.rs @@ -0,0 +1,39 @@ +//! Describes an operating system in high level. +//! +//! Currently there are 3 key parts describing an OS, each subsetting the previous level: +//! * `OS` +//! * `Process` +//! * `ModuleInfo` +//! +//! `OS` abstracts away the very root of the system. Often times, the underlying object is a OS +//! kernel, but it should not be a concern, because it is designed to also work with various non-OS +//! systems like UEFI firmware, as well as pseudo implementations that use native system calls. +//! +//! `Process` abstracts away a single process. It provides memory access, module lists, and more. +//! +//! `ModuleInfo` currently is just an information block, without any memory access, or special +//! functions. It might be wise to implement helpers for exported functions, memory protection +//! flags, and other things concerned with individual modules. + +pub mod keyboard; +pub mod module; +pub mod process; +pub mod root; +pub mod util; + +pub use keyboard::{Keyboard, KeyboardState, OsKeyboard}; + +pub use module::{ + ExportCallback, ExportInfo, ImportCallback, ImportInfo, ModuleAddressCallback, + ModuleAddressInfo, ModuleInfo, ModuleInfoCallback, SectionCallback, SectionInfo, +}; + +pub use process::{Pid, Process, ProcessInfo, ProcessInfoCallback, ProcessState}; + +pub use root::{Os, OsInfo}; + +use crate::types::Address; + +use crate::cglue::*; + +pub type AddressCallback<'a> = OpaqueCallback<'a, Address>; diff --git a/apex_dma/memflow_lib/memflow/src/os/module.rs b/apex_dma/memflow_lib/memflow/src/os/module.rs new file mode 100644 index 0000000..b756344 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/os/module.rs @@ -0,0 +1,123 @@ +//! Describes modules + +use crate::prelude::v1::*; + +/// Module information structure +#[repr(C)] +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +#[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))] +pub struct ModuleInfo { + /// Returns the address of the module header. + /// + /// # Remarks + /// + /// On Windows this will be the address where the [`PEB`](https://docs.microsoft.com/en-us/windows/win32/api/winternl/ns-winternl-peb) entry is stored. + pub address: Address, + /// The base address of the parent process. + /// + /// # Remarks + /// + /// This field is analog to the `ProcessInfo::address` field. + pub parent_process: Address, + /// The actual base address of this module. + /// + /// # Remarks + /// + /// The base address is contained in the virtual address range of the process + /// this module belongs to. + pub base: Address, + /// Size of the module + pub size: umem, + /// Name of the module + pub name: ReprCString, + /// Path of the module + pub path: ReprCString, + /// Architecture of the module + /// + /// # Remarks + /// + /// Emulated processes often have 2 separate lists of modules, one visible to the emulated + /// context (e.g. all 32-bit modules in a WoW64 process), and the other for all native modules + /// needed to support the process emulation. This should be equal to either + /// `ProcessInfo::proc_arch`, or `ProcessInfo::sys_arch` of the parent process. + pub arch: ArchitectureIdent, +} + +pub type ModuleInfoCallback<'a> = OpaqueCallback<'a, ModuleInfo>; + +/// Pair of address and architecture used for callbacks +#[repr(C)] +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +#[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))] +pub struct ModuleAddressInfo { + pub address: Address, + pub arch: ArchitectureIdent, +} + +pub type ModuleAddressCallback<'a> = OpaqueCallback<'a, ModuleAddressInfo>; + +/// Import information structure +#[repr(C)] +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +#[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))] +pub struct ImportInfo { + /// Name of the import + pub name: ReprCString, + /// Offset of this import from the containing modules base address + pub offset: umem, +} + +pub type ImportCallback<'a> = OpaqueCallback<'a, ImportInfo>; + +/// Export information structure +#[repr(C)] +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +#[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))] +pub struct ExportInfo { + /// Name of the export + pub name: ReprCString, + /// Offset of this export from the containing modules base address + pub offset: umem, +} + +pub type ExportCallback<'a> = OpaqueCallback<'a, ExportInfo>; + +/// Section information structure +#[repr(C)] +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +#[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))] +pub struct SectionInfo { + /// Name of the section + pub name: ReprCString, + /// Virtual address of this section (essentially module_info.base + virtual_address) + pub base: Address, + /// Size of this section + pub size: umem, +} + +impl SectionInfo { + /// Checks whether this section is of given name, ignoring '.' or '__' prefix. + pub fn is_section(&self, name: &str) -> bool { + let mut n = self.name.as_ref(); + if let Some(stripped) = n.strip_prefix('.') { + n = stripped; + } else if let Some(stripped) = n.strip_prefix("__") { + n = stripped; + } else { + return false; + } + n == name + } + + /// Checks whether given section is 'text', ignoring prefix. + pub fn is_text(&self) -> bool { + self.is_section("text") + } +} + +pub type SectionCallback<'a> = OpaqueCallback<'a, SectionInfo>; diff --git a/apex_dma/memflow_lib/memflow/src/os/process.rs b/apex_dma/memflow_lib/memflow/src/os/process.rs new file mode 100644 index 0000000..e811a6c --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/os/process.rs @@ -0,0 +1,364 @@ +//! Describes process context + +use super::{ + ExportCallback, ExportInfo, ImportCallback, ImportInfo, ModuleAddressInfo, ModuleInfo, + ModuleInfoCallback, SectionCallback, SectionInfo, +}; +use crate::cglue::*; +use crate::prelude::v1::{Result, *}; +use std::prelude::v1::*; + +/// Type meant for process IDs +/// +/// If there is a case where Pid can be over 32-bit limit, or negative, please open an issue, we +/// would love to see that. +pub type Pid = u32; + +/// Exit code of a process +pub type ExitCode = i32; + +/// The state of a process +/// +/// # Remarks +/// +/// In case the exit code isn't known ProcessState::Unknown is set. +#[repr(C)] +#[derive(Debug, Clone, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +#[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))] +pub enum ProcessState { + Unknown, + Alive, + Dead(ExitCode), +} + +impl ProcessState { + pub fn is_alive(&self) -> bool { + matches!(*self, ProcessState::Alive) + } + + pub fn is_dead(&self) -> bool { + matches!(*self, ProcessState::Dead(_)) + } + + pub fn is_unknown(&self) -> bool { + matches!(*self, ProcessState::Unknown) + } +} + +/// Provides all actions on processes +/// +/// This trait provides a lot of typical functionality for processes, such as memory access, module lists, and basic information. +/// +/// Future expansions could include threads, keyboard input, and more. +#[cfg_attr(feature = "plugins", cglue_trait)] +#[int_result] +pub trait Process: Send { + /// Retrieves the state of the process + fn state(&mut self) -> ProcessState; + + /// Changes the dtb this process uses for memory translations + /// + /// # Remarks + /// + /// In case the architecture only uses a single dtb for translation the second parameter should be set to `Address::invalid()`. + fn set_dtb(&mut self, dtb1: Address, dtb2: Address) -> Result<()>; + + /// Walks the process' module list and calls the provided callback for each module structure + /// address + /// + /// # Arguments + /// * `target_arch` - sets which architecture to retrieve the modules for (if emulated). Choose + /// between `Some(ProcessInfo::sys_arch())`, and `Some(ProcessInfo::proc_arch())`. `None` for all. + /// * `callback` - where to pass each matching module to. This is an opaque callback. + fn module_address_list_callback( + &mut self, + target_arch: Option<&ArchitectureIdent>, + callback: ModuleAddressCallback, + ) -> Result<()>; + + /// Walks the process' module list and calls the provided callback for each module + /// + /// # Arguments + /// * `target_arch` - sets which architecture to retrieve the modules for (if emulated). Choose + /// between `Some(ProcessInfo::sys_arch())`, and `Some(ProcessInfo::proc_arch())`. `None` for all. + /// * `callback` - where to pass each matching module to. This is an opaque callback. + fn module_list_callback( + &mut self, + target_arch: Option<&ArchitectureIdent>, + mut callback: ModuleInfoCallback, + ) -> Result<()> { + // This is safe, because control will flow back to the callback. + let sptr = self as *mut Self; + let inner_callback = &mut |ModuleAddressInfo { address, arch }| match unsafe { &mut *sptr } + .module_by_address(address, arch) + { + Ok(info) => callback.call(info), + Err(e) => { + log::trace!("Error when reading module {:x} {:?}", address, e); + true // continue iteration + } + }; + unsafe { sptr.as_mut().unwrap() } + .module_address_list_callback(target_arch, inner_callback.into()) + } + + /// Retrieves a module by its structure address and architecture + /// + /// # Arguments + /// * `address` - address where module's information resides in + /// * `architecture` - architecture of the module. Should be either `ProcessInfo::proc_arch`, or `ProcessInfo::sys_arch`. + fn module_by_address( + &mut self, + address: Address, + architecture: ArchitectureIdent, + ) -> Result; + + /// Finds a process module by its name under specified architecture + /// + /// This function can be useful for quickly accessing a specific module + /// + /// # Arguments + /// * `name` - name of the module to find + /// * `architecture` - architecture of the module. Should be either `ProcessInfo::proc_arch`, or `ProcessInfo::sys_arch`, or None for both. + fn module_by_name_arch( + &mut self, + name: &str, + architecture: Option<&ArchitectureIdent>, + ) -> Result { + let mut ret = Err(Error(ErrorOrigin::OsLayer, ErrorKind::ModuleNotFound)); + let callback = &mut |data: ModuleInfo| { + if data.name.as_ref() == name { + ret = Ok(data); + false + } else { + true + } + }; + self.module_list_callback(architecture, callback.into())?; + ret + } + + /// Finds any architecture process module by its name + /// + /// This function can be useful for quickly accessing a specific module + /// + /// # Arguments + /// * `name` - name of the module to find + fn module_by_name(&mut self, name: &str) -> Result { + self.module_by_name_arch(name, None) + } + + /// Retrieves a module list for the process + /// + /// # Arguments + /// * `target_arch` - sets which architecture to retrieve the modules for (if emulated). Choose + /// between `Some(ProcessInfo::sys_arch())`, and `Some(ProcessInfo::proc_arch())`. `None` for all. + #[skip_func] + fn module_list_arch( + &mut self, + target_arch: Option<&ArchitectureIdent>, + ) -> Result> { + let mut ret = vec![]; + self.module_list_callback(target_arch, (&mut ret).into())?; + Ok(ret) + } + + /// Retrieves a module list for the process + /// + /// This is equivalent to `Process::module_list_arch(None)` + #[skip_func] + fn module_list(&mut self) -> Result> { + self.module_list_arch(None) + } + + /// Retrieves address of the primary module structure of the process + /// + /// This will generally be for the initial executable that was run + fn primary_module_address(&mut self) -> Result
; + + /// Retrieves information for the primary module of the process + /// + /// This will generally be the initial executable that was run + fn primary_module(&mut self) -> Result { + let addr = self.primary_module_address()?; + self.module_by_address(addr, self.info().proc_arch) + } + + /// Retrieves a list of all imports of a given module + fn module_import_list_callback( + &mut self, + info: &ModuleInfo, + callback: ImportCallback, + ) -> Result<()>; + + /// Retrieves a list of all exports of a given module + fn module_export_list_callback( + &mut self, + info: &ModuleInfo, + callback: ExportCallback, + ) -> Result<()>; + + /// Retrieves a list of all sections of a given module + fn module_section_list_callback( + &mut self, + info: &ModuleInfo, + callback: SectionCallback, + ) -> Result<()>; + + /// Retrieves a list of all imports of a given module + #[skip_func] + fn module_import_list(&mut self, info: &ModuleInfo) -> Result> { + let mut ret = vec![]; + self.module_import_list_callback(info, (&mut ret).into())?; + Ok(ret) + } + + /// Retrieves a list of all exports of a given module + #[skip_func] + fn module_export_list(&mut self, info: &ModuleInfo) -> Result> { + let mut ret = vec![]; + self.module_export_list_callback(info, (&mut ret).into())?; + Ok(ret) + } + + /// Retrieves a list of all sections of a given module + #[skip_func] + fn module_section_list(&mut self, info: &ModuleInfo) -> Result> { + let mut ret = vec![]; + self.module_section_list_callback(info, (&mut ret).into())?; + Ok(ret) + } + + /// Finds a single import of a given module by its name + fn module_import_by_name(&mut self, info: &ModuleInfo, name: &str) -> Result { + let mut ret = Err(Error(ErrorOrigin::OsLayer, ErrorKind::ImportNotFound)); + let callback = &mut |data: ImportInfo| { + if data.name.as_ref() == name { + ret = Ok(data); + false + } else { + true + } + }; + self.module_import_list_callback(info, callback.into())?; + ret + } + + /// Finds a single export of a given module by its name + fn module_export_by_name(&mut self, info: &ModuleInfo, name: &str) -> Result { + let mut ret = Err(Error(ErrorOrigin::OsLayer, ErrorKind::ImportNotFound)); + let callback = &mut |data: ExportInfo| { + if data.name.as_ref() == name { + ret = Ok(data); + false + } else { + true + } + }; + self.module_export_list_callback(info, callback.into())?; + ret + } + + /// Finds a single section of a given module by its name + fn module_section_by_name(&mut self, info: &ModuleInfo, name: &str) -> Result { + let mut ret = Err(Error(ErrorOrigin::OsLayer, ErrorKind::ImportNotFound)); + let callback = &mut |data: SectionInfo| { + if data.name.as_ref() == name { + ret = Ok(data); + false + } else { + true + } + }; + self.module_section_list_callback(info, callback.into())?; + ret + } + + /// Retrieves the process info + fn info(&self) -> &ProcessInfo; + + fn mapped_mem_range( + &mut self, + gap_size: imem, + start: Address, + end: Address, + out: MemoryRangeCallback, + ); + + #[skip_func] + fn mapped_mem_range_vec( + &mut self, + gap_size: imem, + start: Address, + end: Address, + ) -> Vec { + let mut out = vec![]; + self.mapped_mem_range(gap_size, start, end, (&mut out).into()); + out + } + + fn mapped_mem(&mut self, gap_size: imem, out: MemoryRangeCallback) { + self.mapped_mem_range(gap_size, Address::null(), Address::invalid(), out) + } + + #[skip_func] + fn mapped_mem_vec(&mut self, gap_size: imem) -> Vec { + let mut out = vec![]; + self.mapped_mem(gap_size, (&mut out).into()); + out + } +} + +/// Process information structure +/// +/// This structure implements basic process information. Architectures are provided both of the +/// system, and of the process. +#[repr(C)] +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +#[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))] +pub struct ProcessInfo { + /// The base address of this process. + /// + /// # Remarks + /// + /// On Windows this will be the address of the [`_EPROCESS`](https://www.nirsoft.net/kernel_struct/vista/EPROCESS.html) structure. + pub address: Address, + /// ID of this process. + pub pid: Pid, + /// The current status of the process at the time when this process info was fetched. + /// + /// # Remarks + /// + /// This field is highly volatile and can be re-checked with the [`Process::state()`] function. + pub state: ProcessState, + /// Name of the process. + pub name: ReprCString, + /// Path of the process binary + pub path: ReprCString, + /// Command line the process was started with. + pub command_line: ReprCString, + /// System architecture of the target system. + pub sys_arch: ArchitectureIdent, + /// Process architecture + /// + /// # Remarks + /// + /// Specifically on 64-bit systems this could be different + /// to the `sys_arch` in case the process is an emulated 32-bit process. + /// + /// On windows this technique is called [`WOW64`](https://docs.microsoft.com/en-us/windows/win32/winprog64/wow64-implementation-details). + pub proc_arch: ArchitectureIdent, + /// Directory Table Base + /// + /// # Remarks + /// + /// These fields contain the translation base used to translate virtual memory addresses into physical memory addresses. + /// On x86 systems only `dtb1` is set because only one dtb is used. + /// On arm systems both `dtb1` and `dtb2` are set to their corresponding values. + pub dtb1: Address, + pub dtb2: Address, +} + +pub type ProcessInfoCallback<'a> = OpaqueCallback<'a, ProcessInfo>; diff --git a/apex_dma/memflow_lib/memflow/src/os/root.rs b/apex_dma/memflow_lib/memflow/src/os/root.rs new file mode 100644 index 0000000..2cbb067 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/os/root.rs @@ -0,0 +1,387 @@ +//! Describes the root of the Operating System + +use super::process::*; +use super::{AddressCallback, ProcessInfo, ProcessInfoCallback}; + +use crate::prelude::v1::{Result, *}; + +use crate::cglue::*; +use std::prelude::v1::*; + +/// High level OS trait implemented by OS layers. +/// +/// This trait provides all necessary functions for handling an OS, retrieving processes, and +/// moving resources into processes. +/// +/// There are also methods for accessing system level modules. +#[cfg_attr(feature = "plugins", cglue_trait)] +#[int_result] +pub trait Os: Send { + #[wrap_with_group(crate::plugins::os::ProcessInstance)] + type ProcessType<'a>: crate::os::process::Process + MemoryView + 'a + where + Self: 'a; + #[wrap_with_group(crate::plugins::os::IntoProcessInstance)] + type IntoProcessType: crate::os::process::Process + MemoryView + Clone + 'static; + + /// Walks a process list and calls a callback for each process structure address + /// + /// The callback is fully opaque. We need this style so that C FFI can work seamlessly. + fn process_address_list_callback(&mut self, callback: AddressCallback) -> Result<()>; + + /// Retrieves a process address list + /// + /// This will be a list of unique internal addresses for underlying process structures + #[skip_func] + fn process_address_list(&mut self) -> Result> { + let mut ret = vec![]; + self.process_address_list_callback((&mut ret).into())?; + Ok(ret) + } + + /// Walks a process list and calls a callback for each process + /// + /// The callback is fully opaque. We need this style so that C FFI can work seamlessly. + fn process_info_list_callback(&mut self, mut callback: ProcessInfoCallback) -> Result<()> { + // This is safe, because control will flow back to the callback. + let sptr = self as *mut Self; + let inner_callback = &mut |addr| match unsafe { &mut *sptr }.process_info_by_address(addr) { + Ok(info) => callback.call(info), + Err(Error(_, ErrorKind::PartialData)) => { + log::trace!("Partial error when reading process {:x}", addr); + true + } + Err(e) => { + log::trace!("Error when reading process {:x} {:?}", addr, e); + false + } + }; + unsafe { sptr.as_mut().unwrap() }.process_address_list_callback(inner_callback.into()) + } + + /// Retrieves a process list + #[skip_func] + fn process_info_list(&mut self) -> Result> { + let mut ret = vec![]; + self.process_info_list_callback((&mut ret).into())?; + Ok(ret) + } + + /// Find process information by its internal address + fn process_info_by_address(&mut self, address: Address) -> Result; + + /// Find process information by its name + /// + /// # Remarks: + /// + /// This function only returns processes whose state is not [`ProcessState::Dead`]. + fn process_info_by_name(&mut self, name: &str) -> Result { + let mut ret = Err(Error(ErrorOrigin::OsLayer, ErrorKind::ProcessNotFound)); + let callback = &mut |data: ProcessInfo| { + if (data.state == ProcessState::Unknown || data.state == ProcessState::Alive) + && data.name.as_ref() == name + { + ret = Ok(data); + false + } else { + true + } + }; + self.process_info_list_callback(callback.into())?; + ret + } + + /// Find process information by its ID + fn process_info_by_pid(&mut self, pid: Pid) -> Result { + let mut ret = Err(Error(ErrorOrigin::OsLayer, ErrorKind::ProcessNotFound)); + let callback = &mut |data: ProcessInfo| { + if data.pid == pid { + ret = Ok(data); + false + } else { + true + } + }; + self.process_info_list_callback(callback.into())?; + ret + } + + /// Construct a process by its info, borrowing the OS + /// + /// It will share the underlying memory resources + fn process_by_info(&mut self, info: ProcessInfo) -> Result>; + + /// Construct a process by its info, consuming the OS + /// + /// This function will consume the Kernel instance and move its resources into the process + fn into_process_by_info(self, info: ProcessInfo) -> Result; + + /// Creates a process by its internal address, borrowing the OS + /// + /// It will share the underlying memory resources + /// + /// If no process with the specified address can be found this function will return an Error. + /// + /// This function can be useful for quickly accessing a process. + fn process_by_address(&mut self, addr: Address) -> Result> { + self.process_info_by_address(addr) + .and_then(move |i| self.process_by_info(i)) + } + + /// Creates a process by its name, borrowing the OS + /// + /// It will share the underlying memory resources + /// + /// If no process with the specified name can be found this function will return an Error. + /// + /// This function can be useful for quickly accessing a process. + /// + /// # Remarks: + /// + /// This function only returns processes whose state is not [`ProcessState::Dead`]. + fn process_by_name(&mut self, name: &str) -> Result> { + self.process_info_by_name(name) + .and_then(move |i| self.process_by_info(i)) + } + + /// Creates a process by its ID, borrowing the OS + /// + /// It will share the underlying memory resources + /// + /// If no process with the specified ID can be found this function will return an Error. + /// + /// This function can be useful for quickly accessing a process. + fn process_by_pid(&mut self, pid: Pid) -> Result> { + self.process_info_by_pid(pid) + .and_then(move |i| self.process_by_info(i)) + } + + /// Creates a process by its internal address, consuming the OS + /// + /// It will consume the OS and not affect memory usage + /// + /// If no process with the specified address can be found this function will return an Error. + /// + /// This function can be useful for quickly accessing a process. + fn into_process_by_address(mut self, addr: Address) -> Result + where + Self: Sized, + { + self.process_info_by_address(addr) + .and_then(|i| self.into_process_by_info(i)) + } + + /// Creates a process by its name, consuming the OS + /// + /// It will consume the OS and not affect memory usage + /// + /// If no process with the specified name can be found this function will return an Error. + /// + /// This function can be useful for quickly accessing a process. + /// + /// # Remarks: + /// + /// This function only returns processes whose state is not [`ProcessState::Dead`]. + fn into_process_by_name(mut self, name: &str) -> Result + where + Self: Sized, + { + self.process_info_by_name(name) + .and_then(|i| self.into_process_by_info(i)) + } + + /// Creates a process by its ID, consuming the OS + /// + /// It will consume the OS and not affect memory usage + /// + /// If no process with the specified ID can be found this function will return an Error. + /// + /// This function can be useful for quickly accessing a process. + fn into_process_by_pid(mut self, pid: Pid) -> Result + where + Self: Sized, + { + self.process_info_by_pid(pid) + .and_then(|i| self.into_process_by_info(i)) + } + + /// Walks the OS module list and calls the provided callback for each module structure + /// address + /// + /// # Arguments + /// * `callback` - where to pass each matching module to. This is an opaque callback. + fn module_address_list_callback(&mut self, callback: AddressCallback) -> Result<()>; + + /// Walks the OS module list and calls the provided callback for each module + /// + /// # Arguments + /// * `callback` - where to pass each matching module to. This is an opaque callback. + fn module_list_callback(&mut self, mut callback: ModuleInfoCallback) -> Result<()> { + // This is safe, because control will flow back to the callback. + let sptr = self as *mut Self; + let inner_callback = + &mut |address: Address| match unsafe { &mut *sptr }.module_by_address(address) { + Ok(info) => callback.call(info), + Err(e) => { + log::trace!("Error when reading module {:x} {:?}", address, e); + true // continue iteration + } + }; + unsafe { sptr.as_mut().unwrap() }.module_address_list_callback(inner_callback.into()) + } + + /// Retrieves a module by its structure address + /// + /// # Arguments + /// * `address` - address where module's information resides in + fn module_by_address(&mut self, address: Address) -> Result; + + /// Finds a OS module by its name + /// + /// This function can be useful for quickly accessing a specific module + fn module_by_name(&mut self, name: &str) -> Result { + let mut ret = Err(Error(ErrorOrigin::OsLayer, ErrorKind::ProcessNotFound)); + let callback = &mut |data: ModuleInfo| { + if data.name.as_ref() == name { + ret = Ok(data); + false + } else { + true + } + }; + self.module_list_callback(callback.into())?; + ret + } + + /// Retrieves a module list for the OS + #[skip_func] + fn module_list(&mut self) -> Result> { + let mut ret = vec![]; + self.module_list_callback((&mut ret).into())?; + Ok(ret) + } + + /// Retrieves address of the primary module of the OS + /// + /// This will generally be for the main kernel process/module + fn primary_module_address(&mut self) -> Result
; + + /// Retrieves information for the primary module of the OS + /// + /// This will generally be for the main kernel process/module + fn primary_module(&mut self) -> Result { + let addr = self.primary_module_address()?; + self.module_by_address(addr) + } + + /// Retrieves a list of all imports of a given module + fn module_import_list_callback( + &mut self, + info: &ModuleInfo, + callback: ImportCallback, + ) -> Result<()>; + + /// Retrieves a list of all exports of a given module + fn module_export_list_callback( + &mut self, + info: &ModuleInfo, + callback: ExportCallback, + ) -> Result<()>; + + /// Retrieves a list of all sections of a given module + fn module_section_list_callback( + &mut self, + info: &ModuleInfo, + callback: SectionCallback, + ) -> Result<()>; + + /// Retrieves a list of all imports of a given module + #[skip_func] + fn module_import_list(&mut self, info: &ModuleInfo) -> Result> { + let mut ret = vec![]; + self.module_import_list_callback(info, (&mut ret).into())?; + Ok(ret) + } + + /// Retrieves a list of all exports of a given module + #[skip_func] + fn module_export_list(&mut self, info: &ModuleInfo) -> Result> { + let mut ret = vec![]; + self.module_export_list_callback(info, (&mut ret).into())?; + Ok(ret) + } + + /// Retrieves a list of all sections of a given module + #[skip_func] + fn module_section_list(&mut self, info: &ModuleInfo) -> Result> { + let mut ret = vec![]; + self.module_section_list_callback(info, (&mut ret).into())?; + Ok(ret) + } + + /// Finds a single import of a given module by its name + fn module_import_by_name(&mut self, info: &ModuleInfo, name: &str) -> Result { + let mut ret = Err(Error(ErrorOrigin::OsLayer, ErrorKind::ImportNotFound)); + let callback = &mut |data: ImportInfo| { + if data.name.as_ref() == name { + ret = Ok(data); + false + } else { + true + } + }; + self.module_import_list_callback(info, callback.into())?; + ret + } + + /// Finds a single export of a given module by its name + fn module_export_by_name(&mut self, info: &ModuleInfo, name: &str) -> Result { + let mut ret = Err(Error(ErrorOrigin::OsLayer, ErrorKind::ImportNotFound)); + let callback = &mut |data: ExportInfo| { + if data.name.as_ref() == name { + ret = Ok(data); + false + } else { + true + } + }; + self.module_export_list_callback(info, callback.into())?; + ret + } + + /// Finds a single section of a given module by its name + fn module_section_by_name(&mut self, info: &ModuleInfo, name: &str) -> Result { + let mut ret = Err(Error(ErrorOrigin::OsLayer, ErrorKind::ImportNotFound)); + let callback = &mut |data: SectionInfo| { + if data.name.as_ref() == name { + ret = Ok(data); + false + } else { + true + } + }; + self.module_section_list_callback(info, callback.into())?; + ret + } + + /// Retrieves the OS info + fn info(&self) -> &OsInfo; +} + +/// Information block about OS +/// +/// This provides some basic information about the OS in question. `base`, and `size` may be +/// omitted in some circumstances (lack of kernel, or privileges). But architecture should always +/// be correct. +#[repr(C)] +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +#[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))] +pub struct OsInfo { + /// Base address of the OS kernel + pub base: Address, + /// Size of the OS kernel + pub size: umem, + /// System architecture + pub arch: ArchitectureIdent, +} diff --git a/apex_dma/memflow_lib/memflow/src/os/util.rs b/apex_dma/memflow_lib/memflow/src/os/util.rs new file mode 100644 index 0000000..978e9ca --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/os/util.rs @@ -0,0 +1,491 @@ +//! Helpers for implementing several OS functions. + +use crate::error::*; +use crate::mem::MemoryView; +use crate::os::*; +use crate::types::umem; +use cglue::prelude::v1::ReprCString; +use dataview::PodMethods; +use std::vec::Vec; + +#[cfg(feature = "goblin")] +use goblin::{ + container::Ctx, + elf::{dynamic, Dynamic, Elf, ProgramHeader, RelocSection, Symtab}, + mach::{exports::ExportInfo as MachExportInfo, Mach, MachO}, + pe::{options::ParseOptions, PE}, + strtab::Strtab, + Object, +}; + +fn aligned_alloc(bytes: usize) -> Vec { + vec![0; (bytes + 8 - 1) / 8] +} + +#[cfg(feature = "goblin")] +fn parse_elf(bytes: &[u8]) -> goblin::error::Result> { + let header = Elf::parse_header(bytes)?; + + let ctx = Ctx { + container: header.container()?, + le: header.endianness()?, + }; + + let program_headers = + ProgramHeader::parse(bytes, header.e_phoff as usize, header.e_phnum as usize, ctx)?; + + let dynamic = Dynamic::parse(bytes, &program_headers, ctx)?; + + let mut dynsyms = Symtab::default(); + let mut dynstrtab = Strtab::default(); + let mut dynrelas = RelocSection::default(); + let mut dynrels = RelocSection::default(); + let mut pltrelocs = RelocSection::default(); + + if let Some(ref dynamic) = dynamic { + let dyn_info = &dynamic.info; + + dynstrtab = Strtab::parse(bytes, dyn_info.strtab, dyn_info.strsz, 0x0)?; + + /*if dyn_info.soname != 0 { + // FIXME: warn! here + soname = dynstrtab.get_at(dyn_info.soname); + } + if dyn_info.needed_count > 0 { + libraries = dynamic.get_libraries(&dynstrtab); + }*/ + // parse the dynamic relocations + if let Ok(relas) = RelocSection::parse(bytes, dyn_info.rela, dyn_info.relasz, true, ctx) { + dynrelas = relas; + dynrels = RelocSection::parse(bytes, dyn_info.rel, dyn_info.relsz, false, ctx)?; + let is_rela = dyn_info.pltrel as u64 == dynamic::DT_RELA; + pltrelocs = + RelocSection::parse(bytes, dyn_info.jmprel, dyn_info.pltrelsz, is_rela, ctx)?; + + // TODO: support these from goblin + let mut num_syms = /*if let Some(gnu_hash) = dyn_info.gnu_hash { + gnu_hash_len(bytes, gnu_hash as usize, ctx)? + } else if let Some(hash) = dyn_info.hash { + hash_len(bytes, hash as usize, header.e_machine, ctx)? + } else*/ { + 0 + }; + let max_reloc_sym = dynrelas + .iter() + .chain(dynrels.iter()) + .chain(pltrelocs.iter()) + .fold(0, |num, reloc| core::cmp::max(num, reloc.r_sym)); + if max_reloc_sym != 0 { + num_syms = core::cmp::max(num_syms, max_reloc_sym + 1); + } + + dynsyms = Symtab::parse(bytes, dyn_info.symtab, num_syms, ctx)?; + } + } + + let mut elf = Elf::lazy_parse(header)?; + + elf.program_headers = program_headers; + elf.dynamic = dynamic; + elf.dynsyms = dynsyms; + elf.dynstrtab = dynstrtab; + elf.dynrelas = dynrelas; + elf.dynrels = dynrels; + elf.pltrelocs = pltrelocs; + + Ok(elf) +} + +#[cfg(feature = "goblin")] +fn custom_parse(buf: &[u8]) -> Result> { + PE::parse_with_opts( + buf, + &ParseOptions { + resolve_rva: false, + parse_attribute_certificates: false, + }, + ) + .map(Object::PE) + .map_err(|e| { + log::debug!("PE: {}", e); + e + }) + .or_else(|_| parse_elf(buf).map(Object::Elf)) + .map_err(|e| { + log::debug!("Elf: {}", e); + e + }) + .or_else(|_| { + // Until https://github.com/m4b/goblin/pull/386 is merged + #[cfg(feature = "unstable_goblin_lossy_macho")] + return Mach::parse_2(buf, true).map(Object::Mach); + #[cfg(not(feature = "unstable_goblin_lossy_macho"))] + return Mach::parse(buf).map(Object::Mach); + }) + .map_err(|e| { + log::debug!("Mach: {}", e); + e + }) + .map_err(|_| Error(ErrorOrigin::OsLayer, ErrorKind::InvalidExeFile)) +} + +#[cfg(feature = "goblin")] +fn macho_base(bin: &MachO) -> Option { + let s = bin.segments.sections().flatten().next()?.ok()?.0; + Some(s.addr as umem) +} + +#[inline] +pub fn module_import_list_callback( + mem: &mut impl MemoryView, + info: &ModuleInfo, + callback: ImportCallback, +) -> Result<()> { + import_list_callback(mem, info.base, info.size, callback) +} + +pub fn import_list_callback( + mem: &mut impl MemoryView, + base: Address, + size: umem, + mut callback: ImportCallback, +) -> Result<()> { + let mut module_image = aligned_alloc(size as usize); + let module_image = module_image.as_bytes_mut(); + + mem.read_raw_into(base, module_image).data_part()?; + + fn import_call(iter: impl Iterator, callback: &mut ImportCallback) { + iter.take_while(|(offset, name)| { + callback.call(ImportInfo { + name: name.clone(), + offset: *offset, + }) + }) + .for_each(|_| {}); + } + + let ret = Err(Error::from(ErrorKind::NotImplemented)); + + #[cfg(feature = "pelite")] + let ret = ret.or_else(|_| { + if let Ok(pe) = pelite::PeView::from_bytes(module_image) { + use pelite::pe32::imports::Import as Import32; + use pelite::pe64::imports::Import as Import64; + use pelite::Wrap::*; + + if let Some(imports) = pe + .iat() + .map(Some) + .or_else(|e| { + if let pelite::Error::Null = e { + Ok(None) + } else { + Err(e) + } + }) + .map_err(|_| ErrorKind::InvalidExeFile)? + { + let iter = imports + .iter() + .filter_map(|w| match w { + T32((addr, Ok(Import32::ByName { name, .. }))) => { + Some((*addr as umem, name)) + } + T64((addr, Ok(Import64::ByName { name, .. }))) => { + Some((*addr as umem, name)) + } + _ => None, + }) + .filter_map(|(a, n)| n.to_str().ok().map(|n| (a, n.into()))); + + import_call(iter, &mut callback); + } + + Ok(()) + } else { + Err(Error::from(ErrorKind::InvalidExeFile)) + } + }); + + #[cfg(feature = "goblin")] + let ret = ret.or_else(|_| match custom_parse(module_image)? { + Object::Elf(elf) => { + let iter = elf + .dynsyms + .iter() + .filter(|s| s.is_import()) + .filter_map(|s| { + elf.dynstrtab + .get_at(s.st_name) + .map(|n| (s.st_value as umem, ReprCString::from(n))) + }); + + import_call(iter, &mut callback); + + Ok(()) + } + Object::PE(pe) => { + let iter = pe + .imports + .iter() + .map(|e| (e.offset as umem, e.name.as_ref().into())); + + import_call(iter, &mut callback); + + Ok(()) + } + Object::Mach(Mach::Binary(bin)) => { + let mbase = macho_base(&bin).unwrap_or_default(); + + let iter = bin + .imports() + .ok() + .into_iter() + .flatten() + .map(|v| ((v.address as umem) - mbase + base.to_umem(), v.name.into())); + + import_call(iter, &mut callback); + + Ok(()) + } + _ => Err(ErrorKind::InvalidExeFile.into()), + }); + + ret +} + +#[inline] +pub fn module_export_list_callback( + mem: &mut impl MemoryView, + info: &ModuleInfo, + callback: ExportCallback, +) -> Result<()> { + export_list_callback(mem, info.base, info.size, callback) +} + +pub fn export_list_callback( + mem: &mut impl MemoryView, + base: Address, + size: umem, + mut callback: ExportCallback, +) -> Result<()> { + let mut module_image = aligned_alloc(size as usize); + let module_image = module_image.as_bytes_mut(); + + mem.read_raw_into(base, module_image).data_part()?; + + fn export_call(iter: impl Iterator, callback: &mut ExportCallback) { + iter.take_while(|(offset, name)| { + callback.call(ExportInfo { + name: name.clone(), + offset: *offset, + }) + }) + .for_each(|_| {}); + } + + let ret = Err(Error::from(ErrorKind::NotImplemented)); + + #[cfg(feature = "pelite")] + let ret = ret.or_else(|_| { + if let Ok(pe) = pelite::PeView::from_bytes(module_image) { + use pelite::pe64::exports::Export; + + if let Some(exports) = pe + .exports() + .map(Some) + .or_else(|e| { + if let pelite::Error::Null = e { + Ok(None) + } else { + Err(e) + } + }) + .map_err(|e| log::debug!("pelite: {}", e)) + .map_err(|_| ErrorKind::InvalidExeFile)? + { + let exports = exports + .by() + .map_err(|e| log::debug!("pelite: {}", e)) + .map_err(|_| ErrorKind::InvalidExeFile)?; + + let iter = exports + .iter_names() + .filter_map(|(n, e)| n.ok().zip(e.ok())) + .filter_map(|(n, e)| match e { + Export::Symbol(off) => Some((*off as umem, n)), + _ => None, + }) + .filter_map(|(o, n)| n.to_str().ok().map(|n| (o, n.into()))); + + export_call(iter, &mut callback); + } + + Ok(()) + } else { + Err(Error::from(ErrorKind::InvalidExeFile)) + } + }); + + #[cfg(feature = "goblin")] + let ret = ret.or_else(|_| match custom_parse(module_image)? { + Object::Elf(elf) => { + let iter = elf + .dynsyms + .iter() + .filter(|s| !s.is_import()) + .filter_map(|s| { + elf.dynstrtab + .get_at(s.st_name) + .map(|n| (s.st_value as umem, ReprCString::from(n))) + }); + + export_call(iter, &mut callback); + + Ok(()) + } + Object::PE(pe) => { + let iter = pe.exports.iter().filter_map(|e| { + e.name + .map(|name| (e.offset.unwrap_or(0usize) as umem, name.into())) + }); + + export_call(iter, &mut callback); + + Ok(()) + } + Object::Mach(Mach::Binary(bin)) => { + let mbase = macho_base(&bin).unwrap_or_default(); + + let iter = bin.exports().ok().into_iter().flatten().filter_map(|v| { + let MachExportInfo::Regular { address, .. } = v.info else { + return None; + }; + + Some(((address as umem) - mbase + base.to_umem(), v.name.into())) + }); + + export_call(iter, &mut callback); + + Ok(()) + } + _ => Err(ErrorKind::InvalidExeFile.into()), + }); + + ret +} + +#[inline] +pub fn module_section_list_callback( + mem: &mut impl MemoryView, + info: &ModuleInfo, + callback: SectionCallback, +) -> Result<()> { + section_list_callback(mem, info.base, info.size, callback) +} + +pub fn section_list_callback( + mem: &mut impl MemoryView, + base: Address, + size: umem, + mut callback: SectionCallback, +) -> Result<()> { + let mut module_image = aligned_alloc(size as usize); + let module_image = module_image.as_bytes_mut(); + + mem.read_raw_into(base, module_image).data_part()?; + + fn section_call( + iter: impl Iterator, + callback: &mut SectionCallback, + base: Address, + ) { + iter.take_while(|(section_base, section_size, name)| { + callback.call(SectionInfo { + name: name.clone(), + base: base + *section_base, + size: *section_size, + }) + }) + .for_each(|_| {}); + } + + let ret = Err(Error::from(ErrorKind::NotImplemented)); + + #[cfg(feature = "pelite")] + let ret = ret.or_else(|_| { + if let Ok(pe) = pelite::PeView::from_bytes(module_image) { + let iter = pe.section_headers().iter().filter_map(|sh| { + sh.name().ok().map(|name| { + ( + sh.virtual_range().start as umem, + sh.virtual_range().end as umem, + name.into(), + ) + }) + }); + + section_call(iter, &mut callback, base); + + Ok(()) + } else { + Err(Error::from(ErrorKind::InvalidExeFile)) + } + }); + + #[cfg(feature = "goblin")] + let ret = ret.or_else(|_| match custom_parse(module_image)? { + Object::Elf(elf) => { + let iter = elf.section_headers.iter().filter_map(|s| { + elf.shdr_strtab + .get_at(s.sh_name) + .map(|n| (s.sh_addr as umem, s.sh_size as umem, ReprCString::from(n))) + }); + + section_call(iter, &mut callback, base); + + Ok(()) + } + Object::PE(pe) => { + let iter = pe.sections.iter().filter_map(|e| { + e.real_name.as_ref().map(|name| { + ( + e.virtual_address as umem, + e.virtual_size as umem, + name.as_str().into(), + ) + }) + }); + + section_call(iter, &mut callback, base); + + Ok(()) + } + Object::Mach(Mach::Binary(bin)) => { + let mut base_off = None; + + let iter = bin.segments.sections().flatten().filter_map(|v| { + let (s, _) = v.ok()?; + let name = &s.sectname; + let name = name.split(|&v| v == 0).next()?; + let name = std::str::from_utf8(name).ok()?; + + let addr = s.addr as umem; + + if base_off.is_none() { + base_off = Some(addr); + } + + Some((addr - base_off.unwrap(), s.size as umem, name.into())) + }); + + section_call(iter, &mut callback, base); + + Ok(()) + } + _ => Err(ErrorKind::InvalidExeFile.into()), + }); + + ret +} diff --git a/apex_dma/memflow_lib/memflow/src/plugins/args.rs b/apex_dma/memflow_lib/memflow/src/plugins/args.rs new file mode 100644 index 0000000..c4645f7 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/plugins/args.rs @@ -0,0 +1,728 @@ +/*! +Connector argument handler. +*/ + +use std::fmt; +use std::prelude::v1::*; + +use crate::error::{Error, ErrorKind, ErrorOrigin, Result}; + +use cglue::{repr_cstring::ReprCString, vec::CVec}; + +use core::convert::TryFrom; +use hashbrown::HashMap; + +/// Argument wrapper for connectors +/// +/// # Examples +/// +/// Construct from a string: +/// ``` +/// use memflow::plugins::Args; +/// use std::convert::TryFrom; +/// +/// let argstr = "opt1=test1,opt2=test2,opt3=test3"; +/// let args: Args = argstr.parse().unwrap(); +/// ``` +/// +/// Construct as builder: +/// ``` +/// use memflow::plugins::Args; +/// +/// let args = Args::new() +/// .insert("arg1", "test1") +/// .insert("arg2", "test2"); +/// ``` +#[repr(C)] +#[derive(Debug, Clone)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +pub struct Args { + // Just how many args do you have usually? + // Hashmap performance improvements may not be worth the complexity + // C/C++ users would have in constructing arguments structure. + args: CVec, +} + +#[derive(Debug, Clone)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +pub struct ArgEntry { + key: ReprCString, + value: ReprCString, +} + +impl> From<(T, T)> for ArgEntry { + fn from((key, value): (T, T)) -> Self { + Self { + key: key.into(), + value: value.into(), + } + } +} + +impl fmt::Display for Args { + /// Generates a string of key-value pairs containing the underlying data of the Args. + /// + /// This function will produce a string that can be properly parsed by the `parse` function again. + /// + /// # Remarks + /// + /// The sorting order of the underlying `HashMap` is random. + /// This function only guarantees that the 'default' value (if it is set) will be the first element. + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut result = Vec::new(); + + if let Some(default) = self.get_default() { + result.push(default.to_string()); + } + + result.extend( + self.args + .iter() + .filter(|e| &*e.key != "default") + .map(|ArgEntry { key, value }| { + if value.contains(',') || value.contains('=') { + format!("{}=\"{}\"", key, value) + } else { + format!("{}={}", key, value) + } + }) + .collect::>(), + ); + + write!(f, "{}", result.join(",")) + } +} + +impl std::str::FromStr for Args { + type Err = crate::error::Error; + + /// Tries to create a `Args` structure from an argument string. + /// + /// The argument string is a string of comma seperated key-value pairs. + /// + /// An argument string can just contain keys and values: + /// `opt1=val1,opt2=val2,opt3=val3` + /// + /// The argument string can also contain a default value as the first entry + /// which will be placed as a default argument: + /// `default_value,opt1=val1,opt2=val2` + /// + /// This function can be used to initialize a connector from user input. + fn from_str(s: &str) -> Result { + let split = split_str_args(s, ',').collect::>(); + + let mut map = HashMap::new(); + for (i, kv) in split.iter().enumerate() { + let kvsplit = split_str_args(kv, '=').collect::>(); + if kvsplit.len() == 2 { + map.insert(kvsplit[0].to_string(), kvsplit[1].to_string()); + } else if i == 0 && !kv.is_empty() { + map.insert("default".to_string(), kv.to_string()); + } + } + + Ok(Self { + args: map.into_iter().map(<_>::into).collect::>().into(), + }) + } +} + +impl Default for Args { + /// Creates an empty `Args` struct. + fn default() -> Self { + Self { + args: Default::default(), + } + } +} + +impl Args { + /// Creates an empty `Args` struct. + pub fn new() -> Self { + Self::default() + } + + /// Creates a `Args` struct with a default (unnamed) value. + pub fn with_default(value: &str) -> Self { + Self::new().insert("default", value) + } + + /// Consumes self, inserts the given key-value pair and returns the self again. + /// + /// This function can be used as a builder pattern when programatically + /// configuring connectors. + /// + /// # Examples + /// + /// ``` + /// use memflow::plugins::Args; + /// + /// let args = Args::new() + /// .insert("arg1", "test1") + /// .insert("arg2", "test2"); + /// ``` + pub fn insert(mut self, key: &str, value: &str) -> Self { + if let Some(a) = self.args.iter_mut().find(|a| &*a.key == key) { + a.value = value.into(); + } else { + self.args.push((key, value).into()); + } + self + } + + /// Tries to retrieve an entry from the options map. + /// If the entry was not found this function returns a `None` value. + pub fn get(&self, key: &str) -> Option<&str> { + self.args + .iter() + .filter(|a| &*a.key == key) + .map(|a| &*a.value) + .next() + } + + /// Tries to retrieve the default entry from the options map. + /// If the entry was not found this function returns a `None` value. + /// + /// This function is a convenience wrapper for `args.get("default")`. + pub fn get_default(&self) -> Option<&str> { + self.get("default") + } +} + +impl TryFrom<&str> for Args { + type Error = Error; + + fn try_from(args: &str) -> Result { + args.parse() + } +} + +impl TryFrom for Args { + type Error = Error; + + fn try_from(args: String) -> Result { + args.parse() + } +} + +impl From for String { + fn from(args: Args) -> Self { + args.to_string() + } +} + +/// Validator for connector arguments +/// +/// # Examples +/// +/// Builder: +/// ``` +/// use memflow::plugins::{ArgsValidator, ArgDescriptor}; +/// +/// let validator = ArgsValidator::new() +/// .arg(ArgDescriptor::new("default")) +/// .arg(ArgDescriptor::new("arg1")); +/// ``` +#[derive(Debug)] +pub struct ArgsValidator { + args: Vec, +} + +impl Default for ArgsValidator { + fn default() -> Self { + Self::new() + } +} + +impl ArgsValidator { + /// Creates an empty `ArgsValidator` struct. + pub fn new() -> Self { + Self { args: Vec::new() } + } + + /// Adds an `ArgDescriptor` to the validator and returns itself. + pub fn arg(mut self, arg: ArgDescriptor) -> Self { + self.args.push(arg); + self + } + + pub fn validate(&self, args: &Args) -> Result<()> { + // check if all given args exist + for arg in args.args.iter() { + if !self.args.iter().any(|a| a.name == *arg.key) { + return Err(Error(ErrorOrigin::ArgsValidator, ErrorKind::ArgNotExists) + .log_error(format!("argument {} does not exist", &*arg.key))); + } + } + + for arg in self.args.iter() { + // check if required args are set + if arg.required && args.get(&arg.name).is_none() { + return Err( + Error(ErrorOrigin::ArgsValidator, ErrorKind::RequiredArgNotFound).log_error( + format!("argument {} is required but could not be found", arg.name), + ), + ); + } + + // check if validate matches + if let Some(validator) = &arg.validator { + if let Some(value) = args.get(&arg.name) { + if let Err(err) = validator(value) { + return Err(Error(ErrorOrigin::ArgsValidator, ErrorKind::ArgValidation) + .log_error(format!("argument {} is invalid: {}", arg.name, err))); + } + } + } + } + + Ok(()) + } +} + +impl fmt::Display for ArgsValidator { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + for (idx, arg) in self.args.iter().enumerate() { + if idx < self.args.len() - 1 { + writeln!(f, "{}", arg).ok(); + } else { + write!(f, "{}", arg).ok(); + } + } + Ok(()) + } +} + +pub type ArgValidator = Box ::std::result::Result<(), &'static str>>; + +/// Describes a single validator argument. +/// +/// # Examples +/// +/// Builder: +/// ``` +/// use memflow::plugins::ArgDescriptor; +/// +/// let desc = ArgDescriptor::new("cache_size") +/// .description("cache_size argument description") +/// .required(true); +/// ``` +pub struct ArgDescriptor { + pub name: String, + pub description: Option, + pub required: bool, + pub validator: Option, +} + +impl ArgDescriptor { + /// Creates a new `ArgDescriptor` with the given argument name. + pub fn new(name: &str) -> Self { + Self { + name: name.to_owned(), + description: None, + required: false, + validator: None, + } + } + + /// Set the description for this argument. + /// + /// By default the description is `None`. + pub fn description(mut self, description: &str) -> Self { + self.description = Some(description.to_owned()); + self + } + + /// Set the required state for this argument. + /// + /// By default arguments are optional. + pub fn required(mut self, required: bool) -> Self { + self.required = required; + self + } + + /// Sets the validator function for this argument. + /// + /// By default no validator is set. + /// + /// # Examples + /// + /// ``` + /// use memflow::plugins::ArgDescriptor; + /// + /// let desc = ArgDescriptor::new("cache_size").validator(Box::new(|arg| { + /// match arg == "valid_option" { + /// true => Ok(()), + /// false => Err("argument must be 'valid_option'"), + /// } + /// })); + /// ``` + pub fn validator(mut self, validator: ArgValidator) -> Self { + self.validator = Some(validator); + self + } +} + +impl fmt::Display for ArgDescriptor { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "{}: {}{}", + self.name, + self.description + .as_ref() + .unwrap_or(&"no description available".to_owned()), + if self.required { " (required)" } else { "" }, + ) + } +} + +impl fmt::Debug for ArgDescriptor { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "{}: {}{}", + self.name, + self.description + .as_ref() + .unwrap_or(&"no description available".to_owned()), + if self.required { " (required)" } else { "" }, + ) + } +} + +/// Split a string into a list of separate parts based on ':' delimiter +/// +/// This is a more advanced version of splitting that allows to do some basic escaping with +/// quotation marks. +/// +/// # Examples +/// +/// ``` +/// use memflow::plugins::args::split_str_args; +/// +/// let v: Vec<_> = split_str_args("a:b:c", ':').collect(); +/// assert_eq!(v, ["a", "b", "c"]); +/// +/// let v: Vec<_> = split_str_args("a::c", ':').collect(); +/// assert_eq!(v, ["a", "", "c"]); +/// +/// let v: Vec<_> = split_str_args("a:\"hello\":c", ':').collect(); +/// assert_eq!(v, ["a", "hello", "c"]); +/// +/// let v: Vec<_> = split_str_args("a:\"hel:lo\":c", ':').collect(); +/// assert_eq!(v, ["a", "hel:lo", "c"]); +/// +/// let v: Vec<_> = split_str_args("a:\"hel:lo:c", ':').collect(); +/// assert_eq!(v, ["a", "\"hel:lo:c"]); +/// +/// let v: Vec<_> = split_str_args("a:'hel\":lo\"':c", ':').collect(); +/// assert_eq!(v, ["a", "hel\":lo\"", "c"]); +/// +/// let v: Vec<_> = split_str_args("a:hel\":lo\":c", ':').collect(); +/// assert_eq!(v, ["a", "hel\":lo\"", "c"]); +/// ``` +pub fn split_str_args(inp: &str, split_char: char) -> impl Iterator { + let mut prev_char = '\0'; + let mut quotation_char = None; + + const VALID_QUOTES: &str = "\"'`"; + assert!(!VALID_QUOTES.contains(split_char)); + + inp.split(move |c| { + let mut ret = false; + + // found an unescaped quote + if VALID_QUOTES.contains(c) && prev_char != '\\' { + // scan string up until we find the same quotation char again + match quotation_char { + Some(qc) if qc == c => { + quotation_char = None; + } + None => quotation_char = Some(c), + _ => (), + } + } + + if quotation_char.is_none() && c == split_char { + ret = true; + } + + prev_char = c; + ret + }) + .map(|s| { + if let Some(c) = s.chars().next().and_then(|a| { + if s.ends_with(a) && VALID_QUOTES.contains(a) { + Some(a) + } else { + None + } + }) { + s.split_once(c) + .and_then(|(_, a)| a.rsplit_once(c)) + .map(|(a, _)| a) + .unwrap_or("") + } else { + s + } + }) +} + +pub fn parse_vatcache(args: &Args) -> Result> { + match args.get("vatcache").unwrap_or("default") { + "default" => Ok(Some((0, 0))), + "none" => Ok(None), + size => Ok(Some(parse_vatcache_args(size)?)), + } +} + +fn parse_vatcache_args(vargs: &str) -> Result<(usize, u64)> { + let mut sp = vargs.splitn(2, ';'); + let (size, time) = ( + sp.next().ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::Configuration) + .log_error("Failed to parse VAT size") + })?, + sp.next().unwrap_or("0"), + ); + let size = usize::from_str_radix(size, 16).map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::Configuration).log_error("Failed to parse VAT size") + })?; + let time = time.parse::().map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::Configuration) + .log_error("Failed to parse VAT validity time") + })?; + Ok((size, time)) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + pub fn from_str() { + let argstr = "opt1=test1,opt2=test2,opt3=test3"; + let args: Args = argstr.parse().unwrap(); + assert_eq!(args.get("opt1").unwrap(), "test1"); + assert_eq!(args.get("opt2").unwrap(), "test2"); + assert_eq!(args.get("opt3").unwrap(), "test3"); + } + + #[test] + pub fn from_str_default() { + let argstr = "test0,opt1=test1,opt2=test2,opt3=test3"; + let args: Args = argstr.parse().unwrap(); + assert_eq!(args.get_default().unwrap(), "test0"); + assert_eq!(args.get("opt1").unwrap(), "test1"); + assert_eq!(args.get("opt2").unwrap(), "test2"); + assert_eq!(args.get("opt3").unwrap(), "test3"); + } + + #[test] + pub fn from_str_default2() { + let argstr = "opt1=test1,test0"; + let args: Args = argstr.parse().unwrap(); + assert_eq!(args.get_default(), None); + assert_eq!(args.get("opt1").unwrap(), "test1"); + } + + #[test] + pub fn builder() { + let args = Args::new().insert("arg1", "test1").insert("arg2", "test2"); + assert_eq!(args.get("arg1").unwrap(), "test1"); + assert_eq!(args.get("arg2").unwrap(), "test2"); + } + + #[test] + pub fn parse_empty() { + let argstr = "opt1=test1,test0"; + let _: Args = argstr.parse().unwrap(); + } + + #[test] + pub fn to_string() { + let argstr = "opt1=test1,opt2=test2,opt3=test3"; + let args: Args = argstr.parse().unwrap(); + let args2: Args = args.to_string().parse().unwrap(); + assert_eq!(args2.get_default(), None); + assert_eq!(args2.get("opt1").unwrap(), "test1"); + assert_eq!(args2.get("opt2").unwrap(), "test2"); + assert_eq!(args2.get("opt3").unwrap(), "test3"); + } + + #[test] + pub fn to_string_with_default() { + let argstr = "test0,opt1=test1,opt2=test2,opt3=test3"; + let args: Args = argstr.parse().unwrap(); + let args2: Args = args.to_string().parse().unwrap(); + assert_eq!(args2.get_default().unwrap(), "test0"); + assert_eq!(args2.get("opt1").unwrap(), "test1"); + assert_eq!(args2.get("opt2").unwrap(), "test2"); + assert_eq!(args2.get("opt3").unwrap(), "test3"); + } + + #[test] + pub fn double_quotes() { + let argstr = "opt1=test1,test0,opt2=\"test2,test3\""; + let args: Args = argstr.parse().unwrap(); + let args2: Args = args.to_string().parse().unwrap(); + assert_eq!(args2.get("opt1").unwrap(), "test1"); + assert_eq!(args2.get("opt2").unwrap(), "test2,test3"); + } + + #[test] + pub fn double_quotes_eq() { + let argstr = "opt1=test1,test0,opt2=\"test2,test3=test4\""; + let args: Args = argstr.parse().unwrap(); + let args2: Args = args.to_string().parse().unwrap(); + assert_eq!(args2.get("opt1").unwrap(), "test1"); + assert_eq!(args2.get("opt2").unwrap(), "test2,test3=test4"); + } + + #[test] + pub fn slashes() { + let argstr = "device=vmware://,remote=rpc://insecure:computername.local"; + let args: Args = argstr.parse().unwrap(); + let args2: Args = args.to_string().parse().unwrap(); + assert_eq!(args2.get("device").unwrap(), "vmware://"); + assert_eq!( + args2.get("remote").unwrap(), + "rpc://insecure:computername.local" + ); + } + + #[test] + pub fn slashes_quotes_split() { + let v: Vec<_> = split_str_args( + "url1=\"uri://ip=test:test@test,test\",url2=\"test:test@test.de,test2:test2@test2.de\"", + ',', + ) + .collect(); + assert_eq!( + v, + [ + "url1=\"uri://ip=test:test@test,test\"", + "url2=\"test:test@test.de,test2:test2@test2.de\"" + ] + ); + } + + #[test] + pub fn slashes_quotes() { + let argstr = "device=\"RAWUDP://ip=127.0.0.1\""; + let args: Args = argstr.parse().unwrap(); + let args2: Args = args.to_string().parse().unwrap(); + assert_eq!(args2.get("device").unwrap(), "RAWUDP://ip=127.0.0.1"); + } + + #[test] + pub fn slashes_mixed_quotes() { + let argstr = "device=`RAWUDP://ip=127.0.0.1`"; + let args: Args = argstr.parse().unwrap(); + assert_eq!(args.get("device").unwrap(), "RAWUDP://ip=127.0.0.1"); + + let arg2str = args.to_string(); + assert_eq!(arg2str, "device=\"RAWUDP://ip=127.0.0.1\""); + + let args2: Args = arg2str.parse().unwrap(); + assert_eq!(args2.get("device").unwrap(), "RAWUDP://ip=127.0.0.1"); + } + + #[test] + pub fn slashes_quotes_complex() { + let argstr = + "url1=\"uri://ip=test:test@test,test\",url2=\"test:test@test.de,test2:test2@test2.de\""; + let args: Args = argstr.parse().unwrap(); + let args2: Args = args.to_string().parse().unwrap(); + assert_eq!(args2.get("url1").unwrap(), "uri://ip=test:test@test,test"); + assert_eq!( + args2.get("url2").unwrap(), + "test:test@test.de,test2:test2@test2.de" + ); + } + + #[test] + pub fn validator_success() { + let validator = ArgsValidator::new() + .arg(ArgDescriptor::new("default")) + .arg(ArgDescriptor::new("opt1")); + + let argstr = "test0,opt1=test1"; + let args: Args = argstr.parse().unwrap(); + + assert_eq!(validator.validate(&args), Ok(())); + } + + #[test] + pub fn validator_success_optional() { + let validator = ArgsValidator::new().arg(ArgDescriptor::new("opt1").required(false)); + + let args: Args = "".parse().unwrap(); + + assert_eq!(validator.validate(&args), Ok(())); + } + + #[test] + pub fn validator_error_required() { + let validator = ArgsValidator::new().arg(ArgDescriptor::new("opt1").required(true)); + + let args: Args = "".parse().unwrap(); + + assert_eq!( + validator.validate(&args), + Err(Error( + ErrorOrigin::ArgsValidator, + ErrorKind::RequiredArgNotFound + )) + ); + } + + #[test] + pub fn validator_error_notexist() { + let validator = ArgsValidator::new().arg(ArgDescriptor::new("opt1")); + + let argstr = "opt2=arg2"; + let args: Args = argstr.parse().unwrap(); + + assert_eq!( + validator.validate(&args), + Err(Error(ErrorOrigin::ArgsValidator, ErrorKind::ArgNotExists)) + ); + } + + #[test] + pub fn validator_validate_success() { + let validator = + ArgsValidator::new().arg(ArgDescriptor::new("default").validator(Box::new(|arg| { + match arg == "valid_option" { + true => Ok(()), + false => Err("argument must be 'valid_option'"), + } + }))); + + let argstr = "default=valid_option"; + let args: Args = argstr.parse().unwrap(); + + assert_eq!(validator.validate(&args), Ok(())); + } + + #[test] + pub fn validator_validate_fail() { + let validator = + ArgsValidator::new().arg(ArgDescriptor::new("default").validator(Box::new(|arg| { + match arg == "valid_option" { + true => Ok(()), + false => Err("argument must be 'valid_option'"), + } + }))); + + let argstr = "invalid_option"; + let args: Args = argstr.parse().unwrap(); + + assert_eq!( + validator.validate(&args), + Err(Error(ErrorOrigin::ArgsValidator, ErrorKind::ArgValidation)) + ); + } +} diff --git a/apex_dma/memflow_lib/memflow/src/plugins/connector.rs b/apex_dma/memflow_lib/memflow/src/plugins/connector.rs new file mode 100644 index 0000000..48ac3c5 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/plugins/connector.rs @@ -0,0 +1,406 @@ +use ::log::info; +use ::std::time::Duration; + +use crate::cglue::{ + result::{from_int_result, from_int_result_empty}, + *, +}; +use crate::error::*; +use crate::mem::phys_mem::*; +use crate::types::{cache::TimedCacheValidator, size}; + +use super::{ + args::split_str_args, Args, LibArc, LibContext, Loadable, OsInstanceArcBox, PluginDescriptor, + TargetInfo, +}; + +use crate::connector::cpu_state::*; +use cglue::trait_group::c_void; + +cglue_trait_group!(ConnectorInstance, { PhysicalMemory, Clone }, { ConnectorCpuState }); +pub type MuConnectorInstanceArcBox<'a> = std::mem::MaybeUninit>; + +/// This creates a cglue plugin instance from the given [`PhysicalMemory`] object. +/// This also configures caching based on the provided input `args`. +pub fn create_instance( + conn: T, + lib: LibArc, + args: &ConnectorArgs, + no_default_cache: bool, +) -> ConnectorInstanceArcBox<'static> +// TODO: get rid of these trait bounds +where + (T, LibArc): Into>, + ( + CachedPhysicalMemory<'static, T, TimedCacheValidator>, + LibArc, + ): Into< + ConnectorInstanceBaseArcBox< + 'static, + CachedPhysicalMemory<'static, T, TimedCacheValidator>, + c_void, + >, + >, +{ + // check if user explicitly enabled caching or alternatively fall back to auto configuration of the connector + let use_cache = Option::::from(args.middleware_args.cache).unwrap_or(!no_default_cache); + let conn = if use_cache { + let cache_page_size = if args.middleware_args.cache_page_size > 0 { + args.middleware_args.cache_page_size + } else { + size::kb(4) + }; + + info!("Inserting `CachedPhysicalMemory` middleware with size={}, validity_time={}, page_size={}", + args.middleware_args.cache_size, args.middleware_args.cache_validity_time, cache_page_size); + + let mut builder = CachedPhysicalMemory::builder(conn).page_size(cache_page_size); + + if args.middleware_args.cache_size > 0 { + builder = builder.cache_size(args.middleware_args.cache_size); + } + + if args.middleware_args.cache_validity_time > 0 { + builder = builder.validator(TimedCacheValidator::new( + Duration::from_millis(args.middleware_args.cache_validity_time).into(), + )) + } + + let conn = builder.build().unwrap(); + group_obj!((conn, lib.clone()) as ConnectorInstance) + } else { + group_obj!((conn, lib.clone()) as ConnectorInstance) + }; + + let conn = if args.middleware_args.delay > 0 { + info!( + "Inserting `DelayedPhysicalMemory` middleware with delay={}", + args.middleware_args.delay + ); + + let conn = DelayedPhysicalMemory::builder(conn) + .delay(Duration::from_micros(args.middleware_args.delay)) + .build() + .unwrap(); + group_obj!((conn, lib.clone()) as ConnectorInstance) + } else { + conn + }; + + if args.middleware_args.metrics { + info!("Inserting `PhysicalMemoryMetrics` middleware",); + let conn = PhysicalMemoryMetrics::new(conn); + group_obj!((conn, lib) as ConnectorInstance) + } else { + conn + } + + // TODO: optional features not forwarded? +} + +#[repr(C)] +#[derive(Default, Clone, Copy)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +pub struct ConnectorMiddlewareArgs { + pub cache: COption, + pub cache_size: usize, + pub cache_validity_time: u64, + pub cache_page_size: usize, + + pub delay: u64, + + pub metrics: bool, +} + +impl ConnectorMiddlewareArgs { + pub fn new() -> Self { + Self::default() + } + + pub fn cache(mut self, cache: bool) -> Self { + self.cache = COption::Some(cache); + self + } + pub fn cache_size(mut self, size: usize) -> Self { + self.cache_size = size; + self + } + pub fn cache_validity_time(mut self, validity_time: u64) -> Self { + self.cache_validity_time = validity_time; + self + } + pub fn cache_page_size(mut self, page_size: usize) -> Self { + self.cache_page_size = page_size; + self + } + + pub fn delay(mut self, delay: u64) -> Self { + self.delay = delay; + self + } + + pub fn metrics(mut self, metrics: bool) -> Self { + self.metrics = metrics; + self + } +} + +impl std::str::FromStr for ConnectorMiddlewareArgs { + type Err = crate::error::Error; + + fn from_str(vargs: &str) -> Result { + let args: Args = vargs.parse()?; + + let (cache, size, time, page_size) = ( + args.get("cache") + .map(|s| s.to_lowercase() == "true" || s == "1"), + args.get("cache_size").unwrap_or("0kb"), + args.get("cache_time").unwrap_or("0"), + args.get("cache_page_size").unwrap_or("0"), + ); + + let (size, size_mul) = { + let mul_arr = &[ + (size::kb(1), ["kb", "k"]), + (size::mb(1), ["mb", "m"]), + (size::gb(1), ["gb", "g"]), + ]; + + mul_arr + .iter() + .flat_map(|(m, e)| e.iter().map(move |e| (*m, e))) + .find_map(|(m, e)| { + if size.to_lowercase().ends_with(e) { + Some((size.trim_end_matches(e), m)) + } else { + None + } + }) + .ok_or_else(|| { + Error(ErrorOrigin::OsLayer, ErrorKind::Configuration) + .log_error("Invalid Page Cache size unit (or none)!") + })? + }; + + let size = usize::from_str_radix(size, 16).map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::Configuration) + .log_error("Failed to parse Page Cache size") + })?; + + let cache_size = size * size_mul; + + let cache_validity_time = time.parse::().map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::Configuration) + .log_error("Failed to parse Page Cache validity time") + })?; + + let cache_page_size = usize::from_str_radix(page_size, 16).map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::Configuration) + .log_error("Failed to parse Page size for an entry") + })?; + + let delay = args + .get("delay") + .unwrap_or("0") + .parse::() + .map_err(|_| { + Error(ErrorOrigin::OsLayer, ErrorKind::Configuration) + .log_error("Failed to parse delay configuration") + })?; + + let metrics = args + .get("metrics") + .map(|s| s.to_lowercase() == "true" || s == "1") + .unwrap_or_default(); + + Ok(Self { + cache: cache.into(), + cache_size, + cache_validity_time, + cache_page_size, + + delay, + + metrics, + }) + } +} + +#[repr(C)] +#[derive(Default, Clone)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +pub struct ConnectorArgs { + pub target: Option, + pub extra_args: Args, + pub middleware_args: ConnectorMiddlewareArgs, +} + +impl std::str::FromStr for ConnectorArgs { + type Err = crate::error::Error; + + fn from_str(s: &str) -> Result { + let mut iter = split_str_args(s, ':'); + + let target = iter + .next() + .and_then(|s| if s.is_empty() { None } else { Some(s.into()) }); + + let extra_args = iter.next().unwrap_or("").parse()?; + + let middleware_args = if let Some(s) = iter.next() { + // allow user to see the parse error + s.parse()? + } else { + ConnectorMiddlewareArgs::default() + }; + + Ok(Self { + target, + extra_args, + middleware_args, + }) + } +} + +impl ConnectorArgs { + pub fn new( + target: Option<&str>, + extra_args: Args, + middleware_args: Option, + ) -> Self { + Self { + target: target.map(<_>::into), + extra_args, + middleware_args: middleware_args.unwrap_or_default(), + } + } +} + +pub type ConnectorDescriptor = PluginDescriptor; + +pub struct LoadableConnector { + descriptor: PluginDescriptor, +} + +impl Loadable for LoadableConnector { + type Instance = ConnectorInstanceArcBox<'static>; + type InputArg = Option>; + type CInputArg = COption>; + type ArgsType = ConnectorArgs; + + fn ident(&self) -> &str { + unsafe { self.descriptor.name.into_str() } + } + + fn export_prefix() -> &'static str { + "MEMFLOW_CONNECTOR_" + } + + fn plugin_type() -> &'static str { + "Connector" + } + + fn new(descriptor: PluginDescriptor) -> Self { + Self { descriptor } + } + + /// Retrieves the help text for this plugin + fn help(&self) -> Result { + match self.descriptor.help_callback { + Some(help_callback) => { + let mut ret = vec![]; + (help_callback)((&mut ret).into()); + ret.first().map(|h| h.to_string()).ok_or_else(|| { + Error(ErrorOrigin::Connector, ErrorKind::NotSupported).log_error(format!( + "Connector `{}` did not return any help text.", + self.ident() + )) + }) + } + None => Err( + Error(ErrorOrigin::Connector, ErrorKind::NotSupported).log_error(format!( + "Connector `{}` does not support help text.", + self.ident() + )), + ), + } + } + + /// Retrieves the list of available targets for this plugin + fn target_list(&self) -> Result> { + match self.descriptor.target_list_callback { + Some(target_list_callback) => { + let mut ret = vec![]; + from_int_result_empty::((target_list_callback)((&mut ret).into()))?; + Ok(ret) + } + None => Err( + Error(ErrorOrigin::Connector, ErrorKind::NotSupported).log_error(format!( + "Connector `{}` does not support target listing.", + self.ident() + )), + ), + } + } + + /// Creates a new connector instance from this library. + /// + /// The connector is initialized with the arguments provided to this function. + fn instantiate( + &self, + library: CArc, + input: Self::InputArg, + args: Option<&ConnectorArgs>, + ) -> Result { + let mut out = MuConnectorInstanceArcBox::uninit(); + let logger = library.as_ref().map(|lib| unsafe { lib.get_logger() }); + let res = + (self.descriptor.create)(args, input.into(), library.into_opaque(), logger, &mut out); + unsafe { from_int_result(res, out) } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + pub fn connector_args_parse() { + let args: ConnectorArgs = + "target:extra=value:cache_size=1kb,cache_time=10,cache_page_size=1000" + .parse() + .expect("unable to parse args"); + assert_eq!(args.target.unwrap(), ReprCString::from("target")); + assert_eq!(args.extra_args.get("extra").unwrap(), "value"); + assert_eq!(Option::::from(args.middleware_args.cache), None); + assert_eq!(args.middleware_args.cache_size, 1024); + assert_eq!(args.middleware_args.cache_validity_time, 10); + assert_eq!(args.middleware_args.cache_page_size, 0x1000); + } + + #[test] + pub fn connector_args_with_cache() { + let args: ConnectorArgs = + "target:extra=value:cache=true,cache_size=1kb,cache_time=10,cache_page_size=1000" + .parse() + .expect("unable to parse args"); + assert_eq!(args.target.unwrap(), ReprCString::from("target")); + assert_eq!(args.extra_args.get("extra").unwrap(), "value"); + assert_eq!(Option::::from(args.middleware_args.cache), Some(true)); + assert_eq!(args.middleware_args.cache_size, 1024); + assert_eq!(args.middleware_args.cache_validity_time, 10); + assert_eq!(args.middleware_args.cache_page_size, 0x1000); + } + + #[test] + pub fn connector_args_url() { + let args: ConnectorArgs = ":device=\"RAWUDP://ip=127.0.0.1:8080\":" + .parse() + .expect("unable to parse args"); + assert_eq!(args.target, None); + assert_eq!( + args.extra_args.get("device").unwrap(), + "RAWUDP://ip=127.0.0.1:8080" + ); + } +} diff --git a/apex_dma/memflow_lib/memflow/src/plugins/logger.rs b/apex_dma/memflow_lib/memflow/src/plugins/logger.rs new file mode 100644 index 0000000..3176382 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/plugins/logger.rs @@ -0,0 +1,173 @@ +/// The plugin logger is just a thin wrapper which redirects all +/// logging functions from the callee to the caller +use crate::cglue::{ + ext::{DisplayBaseRef, DisplayRef}, + COption, CSliceRef, Opaquable, +}; + +use log::{Level, LevelFilter, SetLoggerError}; + +use core::ffi::c_void; + +use std::sync::atomic::{AtomicPtr, Ordering}; + +/// FFI-Safe representation of log::Metadata +#[repr(C)] +pub struct Metadata<'a> { + level: Level, + target: CSliceRef<'a, u8>, +} + +/// FFI-Safe representation of log::Record +#[repr(C)] +pub struct Record<'a> { + metadata: Metadata<'a>, + message: DisplayRef<'a>, + module_path: COption>, + file: COption>, + line: COption, + //#[cfg(feature = "kv_unstable")] + //key_values: KeyValues<'a>, +} + +/// A logger which just forwards all logging calls over the FFI +/// from the callee to the caller (i.e. from the plugin to the main process). +#[repr(C)] +pub struct PluginLogger { + max_level: LevelFilter, + enabled: extern "C" fn(metadata: &Metadata) -> bool, + log: extern "C" fn(record: &Record) -> (), + flush: extern "C" fn() -> (), + on_level_change: AtomicPtr, +} + +impl PluginLogger { + /// Creates a new PluginLogger. + /// + /// # Remarks: + /// + /// This function has to be called on the caller side + /// (i.e. from memflow itself in the main process). + pub fn new() -> Self { + Self { + max_level: log::max_level(), + enabled: mf_log_enabled, + log: mf_log_log, + flush: mf_log_flush, + on_level_change: AtomicPtr::new(std::ptr::null_mut()), + } + } + + /// Initializes the logger and sets up the logger in the log crate. + /// + /// # Remarks: + /// + /// This function has to be invoked on the callee side. + /// (i.e. in the plugin) + pub fn init(&'static self) -> Result<(), SetLoggerError> { + // Explicitly typecheck the signature so that we do not mess anything up + let val: SetMaxLevelFn = mf_log_set_max_level; + self.on_level_change + .store(val as *const c_void as *mut c_void, Ordering::SeqCst); + log::set_max_level(self.max_level); + log::set_logger(self)?; + Ok(()) + } + + /// Updates the log level on the plugin end from local end + pub fn on_level_change(&self, new_level: LevelFilter) { + let val = self.on_level_change.load(Ordering::Relaxed); + if let Some(on_change) = unsafe { std::mem::transmute::<_, Option>(val) } { + on_change(new_level); + } + } +} + +impl Default for PluginLogger { + fn default() -> Self { + PluginLogger::new() + } +} + +fn display_obj<'a, T: 'a + core::fmt::Display>(obj: &'a T) -> DisplayRef<'a> { + let obj: DisplayBaseRef = From::from(obj); + obj.into_opaque() +} + +impl log::Log for PluginLogger { + fn enabled(&self, metadata: &log::Metadata) -> bool { + let m = Metadata { + level: metadata.level(), + target: metadata.target().into(), + }; + (self.enabled)(&m) + } + + fn log(&self, record: &log::Record) { + let message = display_obj(record.args()); + let r = Record { + metadata: Metadata { + level: record.metadata().level(), + target: record.metadata().target().into(), + }, + message, + module_path: record.module_path().map(|s| s.into()).into(), + file: record.file().map(|s| s.into()).into(), + line: record.line().into(), + }; + (self.log)(&r) + } + + fn flush(&self) { + (self.flush)() + } +} + +type SetMaxLevelFn = extern "C" fn(LevelFilter); + +/// FFI function which is being invoked from the main executable to the plugin library. +extern "C" fn mf_log_set_max_level(level: LevelFilter) { + log::set_max_level(level); +} + +/// FFI function which is being invoked from the plugin library to the main executable. +extern "C" fn mf_log_enabled(metadata: &Metadata) -> bool { + log::logger().enabled( + &log::Metadata::builder() + .level(metadata.level) + .target(unsafe { metadata.target.into_str() }) + .build(), + ) +} + +/// FFI function which is being invoked from the plugin library to the main executable. +extern "C" fn mf_log_log(record: &Record) { + log::logger().log( + &log::Record::builder() + .metadata( + log::Metadata::builder() + .level(record.metadata.level) + .target(unsafe { record.metadata.target.into_str() }) + .build(), + ) + .args(format_args!("{}", record.message)) + .module_path(match &record.module_path { + COption::Some(s) => Some(unsafe { s.into_str() }), + COption::None => None, + }) + .file(match &record.file { + COption::Some(s) => Some(unsafe { s.into_str() }), + COption::None => None, + }) + .line(match &record.line { + COption::Some(l) => Some(*l), + COption::None => None, + }) + .build(), + ) +} + +/// FFI function which is being invoked from the plugin library to the main executable. +extern "C" fn mf_log_flush() { + log::logger().flush() +} diff --git a/apex_dma/memflow_lib/memflow/src/plugins/mod.rs b/apex_dma/memflow_lib/memflow/src/plugins/mod.rs new file mode 100644 index 0000000..a0a7ee5 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/plugins/mod.rs @@ -0,0 +1,1215 @@ +/*! +This module contains functions related to the Inventory system for Connectors and Os-Plugins. + +All functionality in this module is gated behind `plugins` feature. +*/ + +use crate::cglue::*; +use cglue::trait_group::c_void; +use core::convert::{TryFrom, TryInto}; +use std::prelude::v1::*; + +pub mod args; +#[doc(hidden)] +pub use args::{ArgDescriptor, Args, ArgsValidator}; + +// cbindgen fails to properly parse this as return type +pub type OptionVoid = Option<&'static mut c_void>; + +pub type LibArc = CArc; + +pub mod connector; +pub use connector::{ + cglue_connectorinstance::*, ConnectorArgs, ConnectorDescriptor, ConnectorMiddlewareArgs, + LoadableConnector, +}; +pub type ConnectorInputArg = ::InputArg; + +pub mod os; +pub use os::{ + cglue_intoprocessinstance::*, cglue_osinstance::*, cglue_processinstance::*, + IntoProcessInstanceArcBox, LoadableOs, MuOsInstanceArcBox, OsArgs, OsDescriptor, + OsInstanceArcBox, ProcessInstanceArcBox, +}; +pub type OsInputArg = ::InputArg; + +pub mod logger; +pub use logger::*; // TODO: restrict + +pub(crate) mod util; +pub use util::{wrap, wrap_with_input}; + +use crate::error::{Result, *}; + +use log::{debug, error, info, warn, LevelFilter}; +use std::fs::read_dir; +use std::mem::MaybeUninit; +use std::path::{Path, PathBuf}; + +use abi_stable::{type_layout::TypeLayout, StableAbi}; +use libloading::Library; +use once_cell::sync::OnceCell; + +/// Exported memflow plugins version +pub const MEMFLOW_PLUGIN_VERSION: i32 = 1; + +/// Help and Target callbacks +pub type HelpCallback<'a> = OpaqueCallback<'a, ReprCString>; + +/// Context for a single library. +pub struct LibContext { + lib: Library, + logger: OnceCell>, +} + +impl From for LibContext { + fn from(lib: Library) -> Self { + Self { + lib, + logger: Default::default(), + } + } +} + +impl LibContext { + /// Get a static logger for this library context. + /// + /// # Safety + /// + /// The returned logger is not actually static. Caller must ensure the reference won't dangle + /// after the library is unloaded. This is typically ensured by only passing this reference to + /// the underlying library code. + pub unsafe fn get_logger(&self) -> &'static PluginLogger { + (&**self.logger.get_or_init(|| Box::new(PluginLogger::new())) as *const PluginLogger) + .as_ref() + .unwrap() + } + + pub fn try_get_logger(&self) -> Option<&PluginLogger> { + self.logger.get().map(|l| &**l) + } +} + +/// Target information structure +#[repr(C)] +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +pub struct TargetInfo { + /// Name of the target + pub name: ReprCString, +} + +pub type TargetCallback<'a> = OpaqueCallback<'a, TargetInfo>; + +#[repr(C)] +pub struct PluginDescriptor { + /// The plugin api version for when the plugin was built. + /// This has to be set to `MEMFLOW_PLUGIN_VERSION` of memflow. + /// + /// If the versions mismatch the inventory will refuse to load. + pub plugin_version: i32, + + /// Does the plugin accept an input parameter? + pub accept_input: bool, + + /// Layout of the input type. + pub input_layout: &'static TypeLayout, + + /// Layout of the loaded type. + pub output_layout: &'static TypeLayout, + + /// The name of the plugin. + /// This name will be used when loading a plugin from the inventory. + /// + /// During plugin discovery, the export suffix has to match this name being capitalized + pub name: CSliceRef<'static, u8>, + + /// The version of the connector. + /// If multiple connectors are installed the latest is picked. + pub version: CSliceRef<'static, u8>, + + /// The description of the connector. + pub description: CSliceRef<'static, u8>, + + /// Retrieves a help string from the plugin (lists all available commands) + pub help_callback: Option ()>, + + /// Retrieves a list of available targets for the plugin + pub target_list_callback: Option i32>, + + /// Create instance of the plugin + pub create: CreateFn, +} + +// This warning is misleading here. `Loadable::ArgsType` isn't constrained to be `#[repr(C)]` here +// but both `ConnectorArgs` and `OsArgs` that use it are marked as `#[repr(C)]`. +#[allow(improper_ctypes_definitions)] +pub type CreateFn = extern "C" fn( + Option<&::ArgsType>, + ::CInputArg, + lib: LibArc, + logger: Option<&'static PluginLogger>, + &mut MaybeUninit<::Instance>, +) -> i32; + +/// Defines a common interface for loadable plugins +pub trait Loadable: Sized { + type Instance: StableAbi; + type InputArg; + type CInputArg: StableAbi; + type ArgsType; + + /// Checks if plugin with the same `ident` already exists in input list + fn exists(&self, instances: &[LibInstance]) -> bool { + instances + .iter() + .filter_map(|i| i.state.as_option()) + .any(|(_, l)| l.ident() == self.ident()) + } + + /// Identifier string of the plugin + fn ident(&self) -> &str; + + fn plugin_type() -> &'static str; + + /// Constant prefix for the plugin type + fn export_prefix() -> &'static str; + + fn new(descriptor: PluginDescriptor) -> Self; + + fn load( + path: impl AsRef, + library: &CArc, + export: &str, + ) -> Result> { + // find os descriptor + let descriptor = unsafe { + library + .as_ref() + // TODO: support loading without arc + .ok_or(Error(ErrorOrigin::Inventory, ErrorKind::Uninitialized))? + .lib + .get::<*mut PluginDescriptor>(format!("{}\0", export).as_bytes()) + .map_err(|_| Error(ErrorOrigin::Inventory, ErrorKind::MemflowExportsNotFound))? + .read() + }; + + // check version + if descriptor.plugin_version != MEMFLOW_PLUGIN_VERSION { + warn!( + "{} has a different version. version {} required, found {}.", + export, MEMFLOW_PLUGIN_VERSION, descriptor.plugin_version + ); + Ok(LibInstance { + path: path.as_ref().to_path_buf(), + state: LibInstanceState::VersionMismatch, + }) + } else if VerifyLayout::check::(Some(descriptor.input_layout)) + .and(VerifyLayout::check::(Some( + descriptor.output_layout, + ))) + .is_valid_strict() + { + Ok(LibInstance { + path: path.as_ref().to_path_buf(), + state: LibInstanceState::Loaded { + library: library.clone(), + loader: Self::new(descriptor), + }, + }) + } else { + warn!("{} has invalid ABI.", export); + Ok(LibInstance { + path: path.as_ref().to_path_buf(), + state: LibInstanceState::InvalidAbi, + }) + } + } + + /// Try to load a plugin library + /// + /// This function will access `library` and try to find corresponding entry for the plugin. If + /// a valid plugins are found, `Ok(LibInstance)` is returned. Otherwise, `Err(Error)` is + /// returned, with appropriate error. + /// + /// # Safety + /// + /// Loading third party libraries is inherently unsafe and the compiler + /// cannot guarantee that the implementation of the library + /// matches the one specified here. This is especially true if + /// the loaded library implements the necessary interface manually. + /// + /// It is adviced to use a provided proc macro to define a valid library. + fn load_all(path: impl AsRef) -> Result>> { + let exports = util::find_export_by_prefix(path.as_ref(), Self::export_prefix())?; + if exports.is_empty() { + return Err(Error( + ErrorOrigin::Inventory, + ErrorKind::MemflowExportsNotFound, + )); + } + + // load library + let library = unsafe { Library::new(path.as_ref()) } + .map_err(|err| { + debug!( + "found {:?} in library '{:?}' but could not load it: {}", + exports, + path.as_ref(), + err + ); + Error(ErrorOrigin::Inventory, ErrorKind::UnableToLoadLibrary) + }) + .map(LibContext::from) + .map(CArc::from)?; + + Ok(exports + .into_iter() + .filter_map(|e| Self::load(path.as_ref(), &library, &e).ok()) + .collect()) + } + + /// Helper function to load a plugin into a list of library instances + /// + /// This function will try finding appropriate plugin entry, and add it into the list if there + /// isn't a duplicate entry. + /// + /// # Safety + /// + /// Loading third party libraries is inherently unsafe and the compiler + /// cannot guarantee that the implementation of the library matches the one + /// specified here. + fn load_append(path: impl AsRef, out: &mut Vec>) -> Result<()> { + // try to get the canonical path + let canonical_path = + std::fs::canonicalize(path.as_ref()).unwrap_or_else(|_| path.as_ref().to_owned()); + + let libs = Self::load_all(path.as_ref())?; + for lib in libs.into_iter() { + // check if the canonical path was already added + if !out.iter().any(|o| o.path == canonical_path) { + if let LibInstanceState::Loaded { library: _, loader } = &lib.state { + // check if the ident already exists + if !loader.exists(out) { + info!( + "adding plugin '{}/{}': {:?}", + Self::plugin_type(), + loader.ident(), + path.as_ref() + ); + out.push(lib); + } else { + debug!( + "skipping library '{}' because it was added already: {:?}", + loader.ident(), + path.as_ref() + ); + return Err(Error(ErrorOrigin::Inventory, ErrorKind::AlreadyExists)); + } + } else { + out.push(lib); + } + } else { + debug!( + "skipping library at '{:?}' because it was added already", + path.as_ref() + ); + return Err(Error(ErrorOrigin::Inventory, ErrorKind::AlreadyExists)); + } + } + + Ok(()) + } + + /// Retrieves the help text for this plugin + fn help(&self) -> Result; + + /// Retrieves the list of available targets for this plugin + fn target_list(&self) -> Result>; + + /// Creates an `Instance` of the library + /// + /// This function assumes that `load` performed necessary safety checks + /// for validity of the library. + fn instantiate( + &self, + library: CArc, + input: Self::InputArg, + args: Option<&Self::ArgsType>, + ) -> Result; +} + +/// The core of the plugin system +/// +/// It scans system directories and collects valid memflow plugins. They can then be instantiated +/// easily. The reason the libraries are collected is to allow for reuse, and save performance +/// +/// # Examples +/// +/// Creating a OS instance, the recommended way: +/// +/// ```no_run +/// use memflow::plugins::Inventory; +/// # use memflow::plugins::OsInstanceArcBox; +/// # use memflow::error::Result; +/// # fn test() -> Result> { +/// let inventory = Inventory::scan(); +/// inventory +/// .builder() +/// .connector("qemu") +/// .os("win32") +/// .build() +/// # } +/// # test().ok(); +/// ``` +/// +/// Nesting connectors and os plugins: +/// ```no_run +/// use memflow::plugins::{Inventory, Args}; +/// # use memflow::error::Result; +/// # fn test() -> Result<()> { +/// let inventory = Inventory::scan(); +/// let os = inventory +/// .builder() +/// .connector("qemu") +/// .os("linux") +/// .connector("qemu") +/// .os("win32") +/// .build(); +/// # Ok(()) +/// # } +/// # test().ok(); +/// ``` +pub struct Inventory { + connectors: Vec>, + os_layers: Vec>, +} + +impl Inventory { + /// Creates a new inventory of plugins from the provided path. + /// The path has to be a valid directory or the function will fail with an `Error::IO` error. + /// + /// # Examples + /// + /// Creating a inventory: + /// ``` + /// use memflow::plugins::Inventory; + /// + /// let inventory = Inventory::scan_path("./") + /// .unwrap(); + /// ``` + pub fn scan_path>(path: P) -> Result { + let mut dir = PathBuf::default(); + dir.push(path); + + let mut ret = Self { + connectors: vec![], + os_layers: vec![], + }; + ret.add_dir(dir)?; + Ok(ret) + } + + /// Creates a new inventory of plugins by searching various paths. + /// + /// It will query PATH, and an additional set of of directories (standard unix ones, if unix, + /// and "HOME/.local/lib" on all OSes) for "memflow" directory, and if there is one, then + /// search for libraries in there. + /// + /// # Examples + /// + /// Creating an inventory: + /// ``` + /// use memflow::plugins::Inventory; + /// + /// let inventory = Inventory::scan(); + /// ``` + pub fn scan() -> Self { + // add default paths + #[cfg(unix)] + let extra_paths: Vec<&str> = vec![ + "/usr/lib", // deprecated + "/usr/local/lib", + ]; + #[cfg(not(unix))] + let extra_paths = if let Some(Some(programfiles)) = + std::env::var_os("PROGRAMFILES").map(|v| v.to_str().map(|s| s.to_owned())) + { + vec![programfiles] + } else { + vec![] + }; + + let path_iter = extra_paths.into_iter().map(PathBuf::from); + + // add environment variable MEMFLOW_PLUGIN_PATH + let path_var = std::env::var_os("MEMFLOW_PLUGIN_PATH"); + let path_iter = path_iter.chain( + path_var + .as_ref() + .map(std::env::split_paths) + .into_iter() + .flatten(), + ); + + // add user directory + #[cfg(unix)] + let path_iter = path_iter.chain(dirs::home_dir().map(|dir| dir.join(".local").join("lib"))); + + #[cfg(not(unix))] + let path_iter = path_iter.chain(dirs::document_dir()); + + let mut ret = Self { + connectors: vec![], + os_layers: vec![], + }; + + for mut path in path_iter { + path.push("memflow"); + ret.add_dir(path).ok(); + } + + // add $MEMFLOW_PLUGIN_PATH at compile time + if let Some(extra_plugin_paths) = option_env!("MEMFLOW_PLUGIN_PATH") { + for p in std::env::split_paths(extra_plugin_paths) { + ret.add_dir(p).ok(); + } + } + + // add current working directory + if let Ok(pwd) = std::env::current_dir() { + ret.add_dir(pwd).ok(); + } + + ret + } + + /// Adds a library directory to the inventory + /// + /// This function applies additional filter to only scan potentially wanted files + /// + /// # Safety + /// + /// Same as previous functions - compiler can not guarantee the safety of + /// third party library implementations. + pub fn add_dir_filtered(&mut self, dir: PathBuf, filter: &str) -> Result<&mut Self> { + if !dir.is_dir() { + return Err(Error(ErrorOrigin::Inventory, ErrorKind::InvalidPath)); + } + + info!("scanning {:?} for libraries", dir,); + + for entry in + read_dir(dir).map_err(|_| Error(ErrorOrigin::Inventory, ErrorKind::UnableToReadDir))? + { + let entry = entry + .map_err(|_| Error(ErrorOrigin::Inventory, ErrorKind::UnableToReadDirEntry))?; + if let Some(true) = entry.file_name().to_str().map(|n| n.contains(filter)) { + self.load(entry.path()); + } + } + + Ok(self) + } + + /// Adds a library directory to the inventory + /// + /// # Safety + /// + /// Same as previous functions - compiler can not guarantee the safety of + /// third party library implementations. + pub fn add_dir(&mut self, dir: PathBuf) -> Result<&mut Self> { + self.add_dir_filtered(dir, "") + } + + /// Adds cargo workspace to the inventory + /// + /// This function is used behind the scenes by the documentation, however, is not particularly + /// useful for end users. + pub fn with_workspace(mut self) -> Result { + let paths = std::fs::read_dir("../target/").map_err(|_| ErrorKind::UnableToReadDir)?; + for path in paths { + match path.unwrap().file_name().to_str() { + Some("release") | Some("debug") | None => {} + Some(x) => { + self.add_dir_filtered(format!("../target/{}/release/deps", x).into(), "ffi") + .ok(); + self.add_dir_filtered(format!("../target/{}/debug/deps", x).into(), "ffi") + .ok(); + } + } + } + self.add_dir_filtered("../target/release/deps".into(), "ffi") + .ok(); + self.add_dir_filtered("../target/debug/deps".into(), "ffi") + .ok(); + Ok(self) + } + + /// Adds a single library to the inventory + /// + /// # Safety + /// + /// Same as previous functions - compiler can not guarantee the safety of + /// third party library implementations. + pub fn load(&mut self, path: PathBuf) -> &mut Self { + Loadable::load_append(&path, &mut self.connectors).ok(); + Loadable::load_append(&path, &mut self.os_layers).ok(); + self + } + + /// Returns the names of all currently available connectors that can be used. + pub fn available_connectors(&self) -> Vec { + self.connectors + .iter() + .filter_map(|c| c.state.as_option()) + .map(|s| s.1.ident().to_string()) + .collect::>() + } + + /// Returns the names of all currently available os plugins that can be used. + pub fn available_os(&self) -> Vec { + self.os_layers + .iter() + .filter_map(|c| c.state.as_option()) + .map(|s| s.1.ident().to_string()) + .collect::>() + } + + /// Returns the help string of the given Connector. + /// + /// This function returns an error in case the Connector was not found or does not implement the help feature. + pub fn connector_help(&self, name: &str) -> Result { + Self::help_internal(&self.connectors, name) + } + + /// Returns the help string of the given Os Plugin. + /// + /// This function returns an error in case the Os Plugin was not found or does not implement the help feature. + pub fn os_help(&self, name: &str) -> Result { + Self::help_internal(&self.os_layers, name) + } + + fn help_internal(libs: &[LibInstance], name: &str) -> Result { + let loader = libs + .iter() + .filter_map(|c| c.state.as_option().map(|s| s.1)) + .find(|s| s.ident() == name) + .ok_or_else(|| { + error!("unable to find plugin with name '{}'.", name,); + error!( + "possible available `{}` plugins are: {}", + T::plugin_type(), + Self::plugin_list_available(libs), + ); + error!( + "outdated/mismatched `{}` plugins where found at: {}", + T::plugin_type(), + Self::plugin_list_unavailable(libs), + ); + Error(ErrorOrigin::Inventory, ErrorKind::PluginNotFound) + })?; + + loader.help() + } + + /// Returns a list of all available targets of the connector. + /// + /// This function returns an error in case the connector does not implement this feature. + pub fn connector_target_list(&self, name: &str) -> Result> { + let loader = self + .connectors + .iter() + .filter_map(|c| c.state.as_option().map(|s| s.1)) + .find(|s| s.ident() == name) + .ok_or_else(|| { + error!("unable to find plugin with name '{}'.", name,); + error!( + "possible available `{}` plugins are: {}", + LoadableConnector::plugin_type(), + Self::plugin_list_available(&self.connectors), + ); + error!( + "outdated/mismatched `{}` plugins where found at: {}", + LoadableConnector::plugin_type(), + Self::plugin_list_unavailable(&self.connectors), + ); + Error(ErrorOrigin::Inventory, ErrorKind::PluginNotFound) + })?; + + loader.target_list() + } + + /// Creates a new Connector / OS builder. + /// + /// # Examples + /// + /// Create a connector: + /// ```no_run + /// use memflow::plugins::Inventory; + /// + /// let inventory = Inventory::scan(); + /// let os = inventory + /// .builder() + /// .connector("qemu") + /// .build(); + /// ``` + /// + /// Create a Connector with arguments: + /// ```no_run + /// use memflow::plugins::{Inventory, Args}; + /// + /// let inventory = Inventory::scan(); + /// let os = inventory + /// .builder() + /// .connector("qemu") + /// .args(str::parse("vm-win10").unwrap()) + /// .build(); + /// ``` + /// + /// Create a Connector and OS with arguments: + /// ```no_run + /// use memflow::plugins::{Inventory, Args}; + /// + /// let inventory = Inventory::scan(); + /// let os = inventory + /// .builder() + /// .connector("qemu") + /// .args(str::parse("vm-win10").unwrap()) + /// .os("win10") + /// .build(); + /// ``` + /// + /// Create a OS without a connector and arguments: + /// ```no_run + /// use memflow::plugins::Inventory; + /// + /// let inventory = Inventory::scan(); + /// let os = inventory + /// .builder() + /// .os("native") + /// .build(); + /// ``` + pub fn builder(&self) -> BuilderEmpty { + BuilderEmpty { inventory: self } + } + + /// Tries to create a new instance for the library with the given name. + /// The instance will be initialized with the args provided to this call. + /// + /// In case no library could be found this will throw an `Error::Library`. + /// + /// # Safety + /// + /// This function assumes all libraries were loaded with appropriate safety + /// checks in place. This function is safe, but can crash if previous checks + /// fail. + /// + /// # Examples + /// + /// Creating a connector instance: + /// ```no_run + /// use memflow::plugins::{Inventory, Args}; + /// + /// let inventory = Inventory::scan_path("./").unwrap(); + /// let connector = inventory + /// .create_connector("coredump", None, None) + /// .unwrap(); + /// ``` + /// + /// Defining a dynamically loaded connector: + /// ``` + /// use memflow::error::Result; + /// use memflow::types::size; + /// use memflow::dummy::DummyMemory; + /// use memflow::plugins::ConnectorArgs; + /// use memflow::derive::connector; + /// use memflow::mem::phys_mem::*; + /// + /// #[connector(name = "dummy_conn")] + /// pub fn create_connector(_args: &ConnectorArgs) -> Result { + /// Ok(DummyMemory::new(size::mb(16))) + /// } + /// ``` + pub fn create_connector( + &self, + name: &str, + input: ConnectorInputArg, + args: Option<&ConnectorArgs>, + ) -> Result> { + Self::create_internal(&self.connectors, name, input, args) + } + + /// Create OS instance + /// + /// This is the primary way of building a OS instance. + /// + /// # Arguments + /// + /// * `name` - name of the target OS + /// * `input` - connector to be passed to the OS + /// * `args` - arguments to be passed to the OS + /// + /// # Examples + /// + /// Creating a OS instance with custom arguments + /// ``` + /// use memflow::plugins::{Inventory, ConnectorArgs}; + /// + /// # let inventory = Inventory::scan().with_workspace().unwrap(); + /// let args = str::parse(":4m").unwrap(); + /// let os = inventory.create_os("dummy", None, Some(&args)) + /// .unwrap(); + /// std::mem::drop(os); + /// ``` + pub fn create_os( + &self, + name: &str, + input: OsInputArg, + args: Option<&OsArgs>, + ) -> Result> { + Self::create_internal(&self.os_layers, name, input, args) + } + + fn create_internal( + libs: &[LibInstance], + name: &str, + input: T::InputArg, + args: Option<&T::ArgsType>, + ) -> Result { + let lib = libs + .iter() + .filter(|l| l.state.is_loaded()) + .find(|l| l.ident() == Some(name)) + .ok_or_else(|| { + error!("unable to find plugin with name '{}'.", name,); + error!( + "possible available `{}` plugins are: {}", + T::plugin_type(), + Self::plugin_list_available(libs), + ); + error!( + "outdated/mismatched `{}` plugins where found at: {}", + T::plugin_type(), + Self::plugin_list_unavailable(libs), + ); + Error(ErrorOrigin::Inventory, ErrorKind::PluginNotFound) + })?; + + if let LibInstanceState::Loaded { library, loader } = &lib.state { + info!( + "attempting to load `{}` type plugin `{}` from `{}`", + T::plugin_type(), + loader.ident(), + lib.path.to_string_lossy(), + ); + + loader.instantiate(library.clone(), input, args) + } else { + unreachable!() + } + } + + /// Sets the maximum logging level in all plugins and updates the + /// internal [`PluginLogger`] in each plugin instance. + pub fn set_max_log_level(&self, level: LevelFilter) { + log::set_max_level(level); + self.update_max_log_level() + } + + fn update_max_log_level(&self) { + let level = log::max_level(); + + self.connectors + .iter() + .filter_map(|c| c.state.as_option()) + .map(|s| s.0) + .chain( + self.os_layers + .iter() + .filter_map(|o| o.state.as_option()) + .map(|s| s.0), + ) + .filter_map(|s| *s.as_ref()) + .filter_map(LibContext::try_get_logger) + .for_each(|l| l.on_level_change(level)); + } + + /// Returns a comma-seperated list of plugin identifiers of all available plugins + fn plugin_list_available(libs: &[LibInstance]) -> String { + libs.iter() + .filter_map(|c| c.state.as_option().map(|s| s.1.ident().to_string())) + .collect::>() + .join(", ") + } + + /// Returns a comma-seperated list of plugin paths of all un-available plugins that where found but could not be loaded. (e.g. because of ABI mismatch) + fn plugin_list_unavailable(libs: &[LibInstance]) -> String { + libs.iter() + .filter(|c| !c.state.is_loaded()) + .map(|c| c.path.to_string_lossy()) + .collect::>() + .join(", ") + } +} + +pub enum BuildStep<'a> { + Connector { + name: &'a str, + args: Option, + }, + Os { + name: &'a str, + args: Option, + }, +} + +impl<'a> BuildStep<'a> { + /// Parse input string and construct steps for building a connector. + /// + /// Name and arguments are separated by `:`, for example: + /// + /// `kvm:5`, or `qemu:win10:memmap=map`. + pub fn new_connector(input: &'a str) -> Result { + let (name, args) = input.split_once(':').unwrap_or((input, "")); + + Ok(Self::Connector { + name, + args: if args.is_empty() { + None + } else { + Some(str::parse(args)?) + }, + }) + } + + /// Parse input string and construct steps for building an OS. + /// + /// Name and arguments are separated by `:`, for example: + /// + /// `win32`, or `win32::dtb=0xdeadbeef`. + pub fn new_os(input: &'a str) -> Result { + let (name, args) = input.split_once(':').unwrap_or((input, "")); + + Ok(Self::Os { + name, + args: if args.is_empty() { + None + } else { + Some(str::parse(args)?) + }, + }) + } + + /// Validate whether the next build step is compatible with the current one. + pub fn validate_next(&self, next: &Self) -> bool { + !matches!( + (self, next), + (BuildStep::Connector { .. }, BuildStep::Connector { .. }) + | (BuildStep::Os { .. }, BuildStep::Os { .. }) + ) + } +} + +fn builder_from_args<'a>( + connectors: impl Iterator, + os_layers: impl Iterator, +) -> Result>> { + let mut layers = connectors + .map(|(i, a)| BuildStep::new_connector(a).map(|a| (i, a))) + .chain(os_layers.map(|(i, a)| BuildStep::new_os(a).map(|a| (i, a)))) + .collect::>>()?; + + layers.sort_by(|(a, _), (b, _)| a.cmp(b)); + + if layers.windows(2).any(|w| !w[0].1.validate_next(&w[1].1)) { + return Err( + Error(ErrorOrigin::Other, ErrorKind::ArgValidation).log_error( + "invalid builder configuration, build steps cannot be used in the given order", + ), + ); + } + + Ok(layers.into_iter().map(|(_, s)| s).collect()) +} + +/// Precompiled connector chain. +/// +/// Use this with [`Inventory::builder`](Inventory::builder). +pub struct ConnectorChain<'a>(Vec>); + +impl<'a> ConnectorChain<'a> { + /// Build a new connector chain. + /// + /// Arguments are iterators of command line arguments with their position and value. The + /// position will be used to sort them and validate whether they are in correct order. + pub fn new( + connectors: impl Iterator, + os_layers: impl Iterator, + ) -> Result { + let steps = builder_from_args(connectors, os_layers)?; + steps.try_into() + } +} + +impl<'a> TryFrom>> for ConnectorChain<'a> { + type Error = Error; + + fn try_from(steps: Vec>) -> Result { + if !matches!(steps.last(), Some(BuildStep::Connector { .. })) { + return Err( + Error(ErrorOrigin::Other, ErrorKind::ArgValidation).log_error( + "invalid builder configuration, last build step has to be a connector", + ), + ); + } + + Ok(Self(steps)) + } +} + +/// Precompiled os chain. +/// +/// Use this with [`Inventory::builder`](Inventory::builder). +pub struct OsChain<'a>(Vec>); + +impl<'a> OsChain<'a> { + /// Build a new OS chain. + /// + /// Arguments are iterators of command line arguments with their position and value. The + /// position will be used to sort them and validate whether they are in correct order. + pub fn new( + connectors: impl Iterator, + os_layers: impl Iterator, + ) -> Result { + let steps = builder_from_args(connectors, os_layers)?; + steps.try_into() + } +} + +impl<'a> TryFrom>> for OsChain<'a> { + type Error = Error; + + fn try_from(steps: Vec>) -> Result { + if !matches!(steps.last(), Some(BuildStep::Os { .. })) { + return Err(Error(ErrorOrigin::Other, ErrorKind::ArgValidation) + .log_error("invalid builder configuration, last build step has to be a os")); + } + + Ok(Self(steps)) + } +} + +/// BuilderEmpty is the starting builder that allows to either call `connector`, or `os`. +pub struct BuilderEmpty<'a> { + inventory: &'a Inventory, +} + +impl<'a> BuilderEmpty<'a> { + /// Adds a Connector instance to the build chain + /// + /// # Arguments + /// + /// * `name` - name of the connector + pub fn connector(self, name: &'a str) -> OsBuilder<'a> { + OsBuilder { + inventory: self.inventory, + steps: vec![BuildStep::Connector { name, args: None }], + } + } + + /// Adds an OS instance to the build chain + /// + /// # Arguments + /// + /// * `name` - name of the target OS + pub fn os(self, name: &'a str) -> ConnectorBuilder<'a> { + ConnectorBuilder { + inventory: self.inventory, + steps: vec![BuildStep::Os { name, args: None }], + } + } + + /// Chains multiple pre-validated steps, resulting in an Os ready-to-build. + /// + /// # Arguments + /// + /// * `chain` - steps to initialize the builder with. + pub fn os_chain(self, chain: OsChain<'a>) -> ConnectorBuilder<'a> { + ConnectorBuilder { + inventory: self.inventory, + steps: chain.0, + } + } + + /// Chains multiple pre-validated steps, resulting in a connector ready-to-build. + /// + /// # Arguments + /// + /// * `chain` - steps to initialize the builder with. + pub fn connector_chain(self, chain: ConnectorChain<'a>) -> OsBuilder<'a> { + OsBuilder { + inventory: self.inventory, + steps: chain.0, + } + } +} + +/// ConnectorBuilder creates a new connector instance with the previous os step as an input. +pub struct ConnectorBuilder<'a> { + inventory: &'a Inventory, + steps: Vec>, +} + +impl<'a> ConnectorBuilder<'a> { + /// Adds a Connector instance to the build chain + /// + /// # Arguments + /// + /// * `name` - name of the connector + pub fn connector(self, name: &'a str) -> OsBuilder<'a> { + let mut steps = self.steps; + steps.push(BuildStep::Connector { name, args: None }); + OsBuilder { + inventory: self.inventory, + steps, + } + } + + /// Appends arguments to the previously added OS. + /// + /// # Arguments + /// + /// * `os_args` - the arguments to be passed to the previously added OS + pub fn args(mut self, os_args: OsArgs) -> ConnectorBuilder<'a> { + if let Some(BuildStep::Os { name: _, args }) = self.steps.iter_mut().last() { + *args = Some(os_args); + } + self + } + + /// Builds the final chain of Connectors and OS and returns the last OS. + /// + /// Each created connector / os instance is fed into the next os / connector instance as an argument. + /// If any build step fails the function returns an error. + pub fn build(self) -> Result> { + let mut connector: Option> = None; + let mut os: Option> = None; + for step in self.steps.iter() { + match step { + BuildStep::Connector { name, args } => { + connector = Some(self.inventory.create_connector(name, os, args.as_ref())?); + os = None; + } + BuildStep::Os { name, args } => { + os = Some(self.inventory.create_os(name, connector, args.as_ref())?); + connector = None; + } + }; + } + os.ok_or(Error(ErrorOrigin::Inventory, ErrorKind::Configuration)) + } +} + +/// OsBuilder creates a new os instance with the previous connector step as an input +pub struct OsBuilder<'a> { + inventory: &'a Inventory, + steps: Vec>, +} + +impl<'a> OsBuilder<'a> { + /// Adds an OS instance to the build chain + /// + /// # Arguments + /// + /// * `name` - name of the target OS + pub fn os(self, name: &'a str) -> ConnectorBuilder<'a> { + let mut steps = self.steps; + steps.push(BuildStep::Os { name, args: None }); + ConnectorBuilder { + inventory: self.inventory, + steps, + } + } + + /// Appends arguments to the previously added Connector. + /// + /// # Arguments + /// + /// * `conn_args` - the arguments to be passed to the previously added Connector + pub fn args(mut self, conn_args: ConnectorArgs) -> OsBuilder<'a> { + if let Some(BuildStep::Connector { name: _, args }) = self.steps.iter_mut().last() { + *args = Some(conn_args); + } + self + } + + /// Builds the final chain of Connectors and OS and returns the last Connector. + /// + /// Each created connector / os instance is fed into the next os / connector instance as an argument. + /// If any build step fails the function returns an error. + pub fn build(self) -> Result> { + let mut connector: Option> = None; + let mut os: Option> = None; + for step in self.steps.iter() { + match step { + BuildStep::Connector { name, args } => { + connector = Some(self.inventory.create_connector(name, os, args.as_ref())?); + os = None; + } + BuildStep::Os { name, args } => { + os = Some(self.inventory.create_os(name, connector, args.as_ref())?); + connector = None; + } + }; + } + connector.ok_or(Error(ErrorOrigin::Inventory, ErrorKind::Configuration)) + } +} + +/// Reference counted library instance +/// +/// This stores the necessary reference counted library instance, in order to prevent the library +/// from unloading unexpectedly. This is the required safety guarantee. +#[repr(C)] +#[derive(Clone)] +pub struct LibInstance { + path: PathBuf, + state: LibInstanceState, +} + +impl LibInstance { + pub fn ident(&self) -> Option<&str> { + self.state.as_option().map(|s| s.1.ident()) + } +} + +#[repr(C)] +#[derive(Clone)] +pub enum LibInstanceState { + Loaded { + library: CArc, + loader: T, + }, + VersionMismatch, + InvalidAbi, +} + +impl LibInstanceState { + pub fn is_loaded(&self) -> bool { + matches!( + self, + LibInstanceState::Loaded { + library: _, + loader: _, + } + ) + } + + pub fn as_option(&self) -> Option<(&CArc, &T)> { + match self { + LibInstanceState::Loaded { library, loader } => Some((library, loader)), + _ => None, + } + } +} diff --git a/apex_dma/memflow_lib/memflow/src/plugins/os.rs b/apex_dma/memflow_lib/memflow/src/plugins/os.rs new file mode 100644 index 0000000..d694362 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/plugins/os.rs @@ -0,0 +1,144 @@ +use crate::cglue::{result::from_int_result, *}; +use crate::error::*; +use crate::mem::memory_view::*; +use crate::mem::phys_mem::*; +use crate::mem::virt_translate::*; +use crate::os::keyboard::*; +use crate::os::process::*; +use crate::os::root::*; + +use super::LibArc; +use super::{ + args::split_str_args, Args, ConnectorInstanceArcBox, LibContext, Loadable, PluginDescriptor, + TargetInfo, +}; + +use cglue::trait_group::c_void; + +pub type OptionArchitectureIdent<'a> = Option<&'a crate::architecture::ArchitectureIdent>; + +cglue_trait_group!(OsInstance, { Os, Clone }, { PhysicalMemory, MemoryView, VirtualTranslate, OsKeyboard }); +pub type MuOsInstanceArcBox<'a> = std::mem::MaybeUninit>; + +cglue_trait_group!(ProcessInstance, { Process, MemoryView }, { VirtualTranslate }); +cglue_trait_group!(IntoProcessInstance, { Process, MemoryView, Clone }, { VirtualTranslate }); + +/// This creates a cglue plugin instance from the given [`Os`] object. +/// In the future this also might enable features (like caching) based on the input `args`. +pub fn create_instance( + conn: T, + lib: LibArc, + _args: &OsArgs, +) -> OsInstanceArcBox<'static> +where + (T, LibArc): Into>, +{ + group_obj!((conn, lib) as OsInstance) +} + +#[repr(C)] +#[derive(Default, Clone, Debug)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +pub struct OsArgs { + pub target: Option, + pub extra_args: Args, +} + +impl std::str::FromStr for OsArgs { + type Err = crate::error::Error; + + fn from_str(s: &str) -> Result { + let mut iter = split_str_args(s, ':'); + + let target = iter + .next() + .and_then(|s| if s.is_empty() { None } else { Some(s.into()) }); + + Ok(Self { + target, + extra_args: iter.next().unwrap_or("").parse()?, + }) + } +} + +impl OsArgs { + pub fn new(target: Option<&str>, extra_args: Args) -> Self { + Self { + target: target.map(<_>::into), + extra_args, + } + } +} + +pub type OsDescriptor = PluginDescriptor; + +pub struct LoadableOs { + descriptor: PluginDescriptor, +} + +impl Loadable for LoadableOs { + type Instance = OsInstanceArcBox<'static>; + type InputArg = Option>; + type CInputArg = COption>; + type ArgsType = OsArgs; + + fn export_prefix() -> &'static str { + "MEMFLOW_OS_" + } + + fn ident(&self) -> &str { + unsafe { self.descriptor.name.into_str() } + } + + fn plugin_type() -> &'static str { + "OS" + } + + fn new(descriptor: PluginDescriptor) -> Self { + Self { descriptor } + } + + /// Retrieves the help text for this plugin + fn help(&self) -> Result { + match self.descriptor.help_callback { + Some(help_callback) => { + let mut ret = vec![]; + (help_callback)((&mut ret).into()); + ret.first().map(|h| h.to_string()).ok_or_else(|| { + Error(ErrorOrigin::Connector, ErrorKind::NotSupported).log_error(format!( + "Os-Plugin `{}` did not return any help text.", + self.ident() + )) + }) + } + None => Err( + Error(ErrorOrigin::Connector, ErrorKind::NotSupported).log_error(format!( + "Os-Plugin `{}` does not support help text.", + self.ident() + )), + ), + } + } + + /// Retrieves the list of available targets for this connector. + fn target_list(&self) -> Result> { + Err(Error(ErrorOrigin::Connector, ErrorKind::NotSupported) + .log_error("Os-Plugin does not support target listing.")) + } + + /// Creates a new OS instance from this library. + /// + /// The OS is initialized with the arguments provided to this function. + fn instantiate( + &self, + library: CArc, + input: Self::InputArg, + args: Option<&OsArgs>, + ) -> Result { + let mut out = MuOsInstanceArcBox::uninit(); + let logger = library.as_ref().map(|lib| unsafe { lib.get_logger() }); + let res = + (self.descriptor.create)(args, input.into(), library.into_opaque(), logger, &mut out); + unsafe { from_int_result(res, out) } + } +} diff --git a/apex_dma/memflow_lib/memflow/src/plugins/util.rs b/apex_dma/memflow_lib/memflow/src/plugins/util.rs new file mode 100644 index 0000000..1d91017 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/plugins/util.rs @@ -0,0 +1,159 @@ +use crate::cglue::result::into_int_out_result; +use crate::error::{Error, ErrorKind, ErrorOrigin}; + +use super::{LibArc, PluginLogger}; + +use std::mem::MaybeUninit; +use std::path::Path; + +#[cfg(not(any(target_os = "windows", target_os = "macos")))] +pub fn find_export_by_prefix( + path: impl AsRef, + prefix: &str, +) -> crate::error::Result> { + use goblin::elf::Elf; + + let buffer = std::fs::read(path.as_ref()) + .map_err(|err| Error(ErrorOrigin::Inventory, ErrorKind::UnableToReadFile).log_trace(err))?; + let elf = Elf::parse(buffer.as_slice()) + .map_err(|err| Error(ErrorOrigin::Inventory, ErrorKind::InvalidExeFile).log_trace(err))?; + Ok(elf + .syms + .iter() + .filter_map(|s| { + if let Some(name) = elf.strtab.get_at(s.st_name) { + match name.starts_with(prefix) { + true => Some(name.to_owned()), + false => None, + } + } else { + None + } + }) + .collect::>()) +} + +#[cfg(target_os = "windows")] +pub fn find_export_by_prefix( + path: impl AsRef, + prefix: &str, +) -> crate::error::Result> { + use goblin::pe::PE; + + let buffer = std::fs::read(path.as_ref()) + .map_err(|err| Error(ErrorOrigin::Inventory, ErrorKind::UnableToReadFile).log_trace(err))?; + let pe = PE::parse(buffer.as_slice()) + .map_err(|err| Error(ErrorOrigin::Inventory, ErrorKind::InvalidExeFile).log_trace(err))?; + Ok(pe + .exports + .iter() + .filter_map(|s| s.name) + .filter_map(|name| { + if name.starts_with(prefix) { + Some(name.to_owned()) + } else { + None + } + }) + .collect::>()) +} + +#[cfg(target_os = "macos")] +pub fn find_export_by_prefix( + path: impl AsRef, + prefix: &str, +) -> crate::error::Result> { + use goblin::mach::{Mach, SingleArch}; + + let buffer = std::fs::read(path.as_ref()) + .map_err(|err| Error(ErrorOrigin::Inventory, ErrorKind::UnableToReadFile).log_trace(err))?; + let mach = Mach::parse(buffer.as_slice()) + .map_err(|err| Error(ErrorOrigin::Inventory, ErrorKind::InvalidExeFile).log_trace(err))?; + let macho = match mach { + Mach::Binary(mach) => mach, + Mach::Fat(mach) => (0..mach.narches) + .filter_map(|i| mach.get(i).ok()) + .filter_map(|a| match a { + SingleArch::MachO(mach) => Some(mach), + SingleArch::Archive(_) => None, + }) + .next() + .ok_or_else(|| { + Error(ErrorOrigin::Inventory, ErrorKind::InvalidExeFile) + .log_trace("failed to find valid MachO header!") + })?, + }; + + // macho symbols are prefixed with `_` in the object file. + let macho_prefix = "_".to_owned() + prefix; + Ok(macho + .symbols + .ok_or_else(|| { + Error(ErrorOrigin::Inventory, ErrorKind::InvalidExeFile) + .log_trace("failed to parse MachO symbols!") + })? + .iter() + .filter_map(|s| s.ok()) + .filter_map(|(name, _)| { + // symbols should only contain ascii characters + if name.starts_with(&macho_prefix) { + Some(name[1..].to_owned()) + } else { + None + } + }) + .collect::>()) +} + +/// Wrapper for instantiating object. +/// +/// This function will initialize the [`PluginLogger`], +/// parse args into `Args`, and call the create_fn +/// +/// This function is used by the connector proc macro +pub fn wrap( + args: Option<&A>, + lib: LibArc, + logger: Option<&'static PluginLogger>, + out: &mut MaybeUninit, + create_fn: impl FnOnce(&A, LibArc) -> Result, +) -> i32 { + if let Some(logger) = logger { + logger.init().ok(); + } + + let default: A = Default::default(); + let args = args.unwrap_or(&default); + + into_int_out_result(create_fn(args, lib), out) +} + +/// Wrapper for instantiating object with all needed parameters +/// +/// This function will initialize the [`PluginLogger`], +/// parse args into `Args` and call the create_fn with `input` forwarded. +/// +/// This function is used by the connector proc macro +pub fn wrap_with_input( + args: Option<&A>, + input: I, + lib: LibArc, + logger: Option<&'static PluginLogger>, + out: &mut MaybeUninit, + create_fn: impl FnOnce(&A, I, LibArc) -> Result, +) -> i32 { + if let Some(logger) = logger { + logger.init().ok(); + } + + let default: A = Default::default(); + let args = args.unwrap_or(&default); + + into_int_out_result( + create_fn(args, input, lib).map_err(|e| { + ::log::error!("{}", e); + e + }), + out, + ) +} diff --git a/apex_dma/memflow_lib/memflow/src/process/mod.rs b/apex_dma/memflow_lib/memflow/src/process/mod.rs deleted file mode 100644 index 0f835d2..0000000 --- a/apex_dma/memflow_lib/memflow/src/process/mod.rs +++ /dev/null @@ -1,81 +0,0 @@ -/*! -Traits for OS independent process abstractions. -*/ - -use std::prelude::v1::*; - -use crate::architecture::ArchitectureObj; -use crate::types::Address; - -/// Trait describing a operating system -pub trait OperatingSystem {} - -/// Type alias for a PID. -pub type PID = u32; - -/// Trait describing OS independent process information. -pub trait OsProcessInfo { - /// Returns the base address of this process. - /// - /// # Remarks - /// - /// On Windows this will return the address of the [`_EPROCESS`](https://www.nirsoft.net/kernel_struct/vista/EPROCESS.html) structure. - fn address(&self) -> Address; - - /// Returns the pid of this process. - fn pid(&self) -> PID; - - /// Returns the name of the process. - /// - /// # Remarks - /// - /// On Windows this will be clamped to 16 characters. - fn name(&self) -> String; - - /// Returns the architecture of the target system. - fn sys_arch(&self) -> ArchitectureObj; - - /// Returns the architecture of the process. - /// - /// # Remarks - /// - /// Specifically on 64-bit systems this could be different - /// to the `sys_arch` in case the process is an emulated 32-bit process. - /// - /// On windows this technique is called [`WOW64`](https://docs.microsoft.com/en-us/windows/win32/winprog64/wow64-implementation-details). - fn proc_arch(&self) -> ArchitectureObj; -} - -// TODO: Range impl for base to size? -/// Trait describing OS independent module information. -pub trait OsProcessModuleInfo { - /// Returns the address of the module header. - /// - /// # Remarks - /// - /// On Windows this will return the address where the [`PEB`](https://docs.microsoft.com/en-us/windows/win32/api/winternl/ns-winternl-peb) entry is stored. - fn address(&self) -> Address; - - /// Returns the base address of the parent process. - /// - /// # Remarks - /// - /// This method is analog to the `OsProcessInfo::address` function. - fn parent_process(&self) -> Address; - - /// Returns the actual base address of this module. - /// - /// # Remarks - /// - /// The base address is contained in the virtual address range of the process - /// this module belongs to. - fn base(&self) -> Address; - - /// Returns the size of the module. - fn size(&self) -> usize; - - /// Returns the full name of the module. - fn name(&self) -> String; -} - -// TODO: Exports / Sections / etc diff --git a/apex_dma/memflow_lib/memflow/src/types/address.rs b/apex_dma/memflow_lib/memflow/src/types/address.rs index 183b565..3f9d6fc 100644 --- a/apex_dma/memflow_lib/memflow/src/types/address.rs +++ b/apex_dma/memflow_lib/memflow/src/types/address.rs @@ -2,49 +2,197 @@ Abstraction over a address on the target system. */ +use super::{PhysicalAddress, Pointer}; +use crate::types::ByteSwap; + use core::convert::TryInto; use std::default::Default; use std::fmt; +use std::hash; use std::ops; -/// This type represents a address on the target system. -/// It internally holds a `u64` value but can also be used -/// when working in 32-bit environments. +/// The largest target memory type +/// The following core rule is defined for these memory types: /// -/// This type will not handle overflow for 32-bit or 64-bit addresses / lengths. -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] -#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] -#[repr(transparent)] -pub struct Address(u64); - -/// Constructs an `Address` from a `i32` value. -impl From for Address { - fn from(item: i32) -> Self { - Self { 0: item as u64 } - } +/// `PAGE_SIZE < usize <= umem` +/// +/// Where `PAGE_SIZE` is any lowest granularity page size, `usize` is the standard size type, and +/// `umem` is memflow's memory size type. +/// +/// This means that `usize` can always be safely cast to `umem`, while anything to do with page +/// sizes can be cast to `umem` safely, +/// +#[cfg(feature = "64_bit_mem")] +#[allow(non_camel_case_types)] +pub type umem = u64; +#[cfg(all(feature = "128_bit_mem", not(feature = "64_bit_mem")))] +#[allow(non_camel_case_types)] +pub type umem = u128; +#[cfg(all(not(feature = "64_bit_mem"), not(feature = "128_bit_mem")))] +#[allow(non_camel_case_types)] +pub type umem = usize; +#[cfg(feature = "64_bit_mem")] +#[allow(non_camel_case_types)] +pub type imem = i64; +#[cfg(all(feature = "128_bit_mem", not(feature = "64_bit_mem")))] +#[allow(non_camel_case_types)] +pub type imem = i128; +#[cfg(all(not(feature = "64_bit_mem"), not(feature = "128_bit_mem")))] +#[allow(non_camel_case_types)] +pub type imem = isize; + +pub const UMEM_BITS: u8 = core::mem::size_of::() as u8 * 8; + +// Enforce the `umem` >= `usize` condition. Whenever a real 128-bit architecture is here, `umem` +// should be expanded to 128 bits. +const _: [u8; (core::mem::size_of::() <= core::mem::size_of::()) as usize] = [0; 1]; + +pub const fn clamp_to_usize(val: umem) -> usize { + let max = core::usize::MAX as umem; + + let ret = if max < val { max } else { val }; + + ret as usize } -/// Constructs an `Address` from a `u32` value. -impl From for Address { - fn from(item: u32) -> Self { - Self { 0: u64::from(item) } - } +pub const fn clamp_to_isize(val: imem) -> isize { + let max = core::isize::MAX as imem; + let min = core::isize::MIN as imem; + + let ret = if max < val { + max + } else if min > val { + min + } else { + val + }; + + ret as isize } -/// Constructs an `Address` from a `u64` value. -impl From for Address { - fn from(item: u64) -> Self { - Self { 0: item } +/// `PrimitiveAddress` describes the address of a target system. +/// The current implementations include `u32`, `u64` and later eventually `u128`. +/// This trait can be used to abstract objects over the target pointer width. +pub trait PrimitiveAddress: + Copy + + Eq + + PartialEq + + Ord + + PartialOrd + + hash::Hash + + fmt::LowerHex + + fmt::UpperHex + + ByteSwap + + ops::Add + + ops::Sub +{ + fn null() -> Self; + fn invalid() -> Self; + + fn min() -> Self; + fn max() -> Self; + + fn from_umem(frm: umem) -> Self; + fn from_imem(frm: imem) -> Self; + + fn wrapping_add(self, rhs: Self) -> Self; + fn wrapping_sub(self, rhs: Self) -> Self; + fn saturating_sub(self, rhs: Self) -> Self; + fn overflowing_shr(self, rhs: u32) -> (Self, bool); + + fn to_umem(self) -> umem; + fn to_imem(self) -> imem; + + #[inline] + fn is_null(self) -> bool { + self.eq(&Self::null()) } } -/// Constructs an `Address` from a `usize` value. -impl From for Address { - fn from(item: usize) -> Self { - Self { 0: item as u64 } - } +#[macro_export] +macro_rules! impl_primitive_address { + ($type_name:ident) => { + impl PrimitiveAddress for $type_name { + #[inline] + fn null() -> Self { + 0 as $type_name + } + + #[inline] + fn invalid() -> Self { + !Self::null() + } + + #[inline] + fn min() -> Self { + Self::MIN + } + + #[inline] + fn max() -> Self { + Self::MAX + } + + #[inline] + fn from_umem(frm: umem) -> Self { + frm as Self + } + + #[inline] + fn from_imem(frm: imem) -> Self { + frm as Self + } + + #[inline] + fn wrapping_add(self, rhs: Self) -> Self { + self.wrapping_add(rhs) + } + + #[inline] + fn wrapping_sub(self, rhs: Self) -> Self { + self.wrapping_sub(rhs) + } + + #[inline] + fn saturating_sub(self, rhs: Self) -> Self { + self.saturating_sub(rhs) + } + + #[inline] + fn overflowing_shr(self, rhs: u32) -> (Self, bool) { + self.overflowing_shr(rhs) + } + + #[inline] + fn to_umem(self) -> umem { + self as umem + } + + #[inline] + fn to_imem(self) -> imem { + self as imem + } + } + }; } +impl_primitive_address!(u16); +impl_primitive_address!(u32); +impl_primitive_address!(u64); +#[cfg(all(feature = "128_bit_mem", not(feature = "64_bit_mem")))] +impl_primitive_address!(u128); + +/// This type represents a address on the target system. +/// It internally holds a `umem` value but can also be used +/// when working in 32-bit environments. +/// +/// This type will not handle overflow for 32-bit or 64-bit addresses / lengths. +#[repr(transparent)] +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +#[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))] +pub struct Address(umem); + impl Address { /// A address with the value of zero. /// @@ -55,7 +203,7 @@ impl Address { /// /// println!("address: {}", Address::NULL); /// ``` - pub const NULL: Address = Address { 0: 0 }; + pub const NULL: Address = Address(0); /// A address with an invalid value. /// @@ -66,7 +214,7 @@ impl Address { /// /// println!("address: {}", Address::INVALID); /// ``` - pub const INVALID: Address = Address { 0: !0 }; + pub const INVALID: Address = Address(!0); /// Returns an address with a value of zero. /// @@ -90,12 +238,31 @@ impl Address { /// ``` /// use memflow::types::Address; /// - /// println!("mask: {}", Address::bit_mask(0..11)); + /// println!("mask: {}", Address::bit_mask(0..=11)); + /// ``` + pub fn bit_mask>(bits: ops::RangeInclusive) -> Address + where + T: TryInto, + T: Copy, + { + Address( + (!0 >> ((UMEM_BITS - 1) - (*bits.end()).try_into().ok().unwrap())) + & !((1 << (*bits.start()).try_into().ok().unwrap()) - 1), + ) + } + + /// Creates a a bit mask (const version with u8 range). + /// This function accepts an (half-open) range excluding the end bit from the mask. + /// + /// # Examples + /// + /// ``` + /// use memflow::types::Address; + /// + /// println!("mask: {}", Address::bit_mask_u8(0..=11)); /// ``` - pub fn bit_mask>(bits: ops::Range) -> Address { - ((0xffff_ffff_ffff_ffff >> (63 - bits.end.try_into().ok().unwrap())) - & !(((1 as u64) << bits.start.try_into().ok().unwrap()) - 1)) - .into() + pub const fn bit_mask_u8(bits: ops::RangeInclusive) -> Address { + Address((!0 >> (UMEM_BITS - 1 - *bits.end())) & !((1 << *bits.start()) - 1)) } /// Checks wether the address is zero or not. @@ -161,70 +328,40 @@ impl Address { self.0 != !0 } - /// Converts the address into a `u32` value. - /// - /// # Examples - /// - /// ``` - /// use memflow::types::Address; - /// - /// let addr = Address::from(0x1000u64); - /// let addr_u32: u32 = addr.as_u32(); - /// assert_eq!(addr_u32, 0x1000); - /// ``` - #[inline] - pub const fn as_u32(self) -> u32 { - self.0 as u32 - } - /// Converts the address into a `u64` value. /// /// # Examples /// /// ``` - /// use memflow::types::Address; + /// use memflow::types::{Address, umem}; /// /// let addr = Address::from(0x1000u64); - /// let addr_u64: u64 = addr.as_u64(); - /// assert_eq!(addr_u64, 0x1000); + /// let addr_umem: umem = addr.to_umem(); + /// assert_eq!(addr_umem, 0x1000); /// ``` #[inline] - pub const fn as_u64(self) -> u64 { + pub const fn to_umem(self) -> umem { self.0 } - /// Converts the address into a `usize` value. - /// - /// # Examples - /// - /// ``` - /// use memflow::types::Address; - /// - /// let addr = Address::from(0x1000u64); - /// let addr_usize: usize = addr.as_usize(); - /// assert_eq!(addr_usize, 0x1000); - /// ``` - #[inline] - pub const fn as_usize(self) -> usize { - self.0 as usize - } - /// Aligns the containing address to the given page size. /// It returns the base address of the containing page. /// /// # Examples /// /// ``` - /// use memflow::types::{Address, size}; + /// use memflow::types::{Address, mem}; /// /// let addr = Address::from(0x1234); - /// let aligned = addr.as_page_aligned(size::kb(4)); + /// let aligned = addr.as_mem_aligned(mem::kb(4)); /// assert_eq!(aligned, Address::from(0x1000)); /// ``` - pub const fn as_page_aligned(self, page_size: usize) -> Address { - Address { - 0: self.0 - self.0 % (page_size as u64), - } + pub const fn as_mem_aligned(self, mem_size: umem) -> Self { + Self(self.0 - self.0 % mem_size) + } + + pub const fn as_page_aligned(self, page_size: usize) -> Self { + self.as_mem_aligned(page_size as umem) } /// Returns true or false wether the bit at the specified index is either 0 or 1. @@ -240,7 +377,7 @@ impl Address { /// assert_eq!(bit, true); /// ``` pub const fn bit_at(self, idx: u8) -> bool { - (self.0 & ((1 as u64) << idx)) != 0 + (self.0 & (1 << idx)) != 0 } /// Extracts the given range of bits by applying a corresponding bitmask. @@ -252,10 +389,25 @@ impl Address { /// use memflow::types::Address; /// /// let addr = Address::from(123456789); - /// println!("bits[0..2] = {}", addr.extract_bits(0..2)); + /// println!("bits[0..2] = {}", addr.extract_bits(0..=2)); /// ``` - pub fn extract_bits>(self, bits: ops::Range) -> Address { - (self.0 & Address::bit_mask(bits).as_u64()).into() + pub fn extract_bits>(self, bits: ops::RangeInclusive) -> Address + where + T: Copy, + { + (self.0 & Address::bit_mask(bits).to_umem()).into() + } + + /// Wrapping (modular) addition. Computes `self + rhs`, + /// wrapping around at the boundary of the type. + pub const fn wrapping_add(self, other: Self) -> Self { + Self(self.0.wrapping_add(other.0)) + } + + /// Wrapping (modular) subtraction. Computes `self - rhs`, + /// wrapping around at the boundary of the type. + pub const fn wrapping_sub(self, other: Self) -> Self { + Self(self.0.wrapping_sub(other.0)) } } @@ -274,107 +426,225 @@ impl Default for Address { } } -/// Adds a `usize` to a `Address` which results in a `Address`. -/// # Examples -/// ``` -/// use memflow::types::Address; -/// assert_eq!(Address::from(10) + 5usize, Address::from(15)); -/// ``` -impl ops::Add for Address { - type Output = Self; - - fn add(self, other: usize) -> Self { - Self { - 0: self.0 + (other as u64), - } +/// Implements byteswapping for the address +impl ByteSwap for Address { + fn byte_swap(&mut self) { + self.0.byte_swap(); } } -/// Adds any compatible type reference to Address -impl<'a, T: Into + Copy> ops::Add<&'a T> for Address { - type Output = Self; +#[macro_export] +macro_rules! impl_address_from { + ($type_name:ident) => { + impl From<$type_name> for Address { + fn from(item: $type_name) -> Self { + Self { 0: item as umem } + } + } - fn add(self, other: &'a T) -> Self { - Self { - 0: self.0 + (*other).into(), + impl From> for Address { + #[inline(always)] + fn from(ptr: Pointer<$type_name, T>) -> Self { + Self { + 0: ptr.inner as umem, + } + } } - } + }; } -/// Adds a `usize` to a `Address`. -/// -/// # Examples -/// -/// ``` -/// use memflow::types::Address; -/// -/// let mut addr = Address::from(10); -/// addr += 5; -/// assert_eq!(addr, Address::from(15)); -/// ``` -impl ops::AddAssign for Address { - fn add_assign(&mut self, other: usize) { - *self = Self { - 0: self.0 + (other as u64), - } +// u16, u32, u64 is handled by the PrimitiveAddress implementation below. +impl_address_from!(i8); +impl_address_from!(u8); +impl_address_from!(i16); +//impl_address_from!(u16); +impl_address_from!(i32); +//impl_address_from!(u32); +impl_address_from!(i64); +//impl_address_from!(u64); +impl_address_from!(usize); +#[cfg(all(feature = "128_bit_mem", not(feature = "64_bit_mem")))] +impl_address_from!(i128); + +/// Converts any `PrimitiveAddress` into an Address. +impl From for Address { + #[inline(always)] + fn from(val: U) -> Self { + Self(val.to_umem()) } } -/// Subtracts a `Address` from a `Address` resulting in a `usize`. -/// -/// # Examples -/// -/// ``` -/// use memflow::types::Address; -/// -/// assert_eq!(Address::from(10) - 5, Address::from(5)); -/// ``` -impl ops::Sub for Address { - type Output = usize; +/// Converts a `PhysicalAddress` into a `Address`. +impl From for Address { + fn from(address: PhysicalAddress) -> Self { + address.address + } +} - fn sub(self, other: Self) -> usize { - (self.0 - other.0) as usize +/// Converts any `Pointer` into an Address. +impl From> for Address { + #[inline(always)] + fn from(ptr: Pointer) -> Self { + Self(ptr.inner.to_umem()) } } -/// Subtracts a `usize` from a `Address` resulting in a `Address`. -impl ops::Sub for Address { - type Output = Address; +#[macro_export] +macro_rules! impl_address_arithmetic_unsigned { + ($type_name:ident) => { + impl ops::Add<$type_name> for Address { + type Output = Self; + + fn add(self, other: $type_name) -> Self { + Self { + 0: self.0 + (other as umem), + } + } + } + + impl ops::AddAssign<$type_name> for Address { + fn add_assign(&mut self, other: $type_name) { + *self = Self { + 0: self.0 + (other as umem), + } + } + } + + impl ops::Sub<$type_name> for Address { + type Output = Address; + + fn sub(self, other: $type_name) -> Address { + Self { + 0: self.0 - (other as umem), + } + } + } + + impl ops::SubAssign<$type_name> for Address { + fn sub_assign(&mut self, other: $type_name) { + *self = Self { + 0: self.0 - (other as umem), + } + } + } + }; +} - fn sub(self, other: usize) -> Address { - Self { - 0: self.0 - (other as u64), +#[macro_export] +macro_rules! impl_address_arithmetic_signed { + ($type_name:ident) => { + impl ops::Add<$type_name> for Address { + type Output = Self; + + fn add(self, other: $type_name) -> Self { + if other >= 0 { + Self { + 0: self.0 + (other as umem), + } + } else { + Self { + 0: self.0 - (-other as umem), + } + } + } } + + impl ops::AddAssign<$type_name> for Address { + fn add_assign(&mut self, other: $type_name) { + if other >= 0 { + *self = Self { + 0: self.0 + (other as umem), + } + } else { + *self = Self { + 0: self.0 - (-other as umem), + } + } + } + } + + impl ops::Sub<$type_name> for Address { + type Output = Address; + + fn sub(self, other: $type_name) -> Address { + if other >= 0 { + Self { + 0: self.0 - (other as umem), + } + } else { + Self { + 0: self.0 + (-other as umem), + } + } + } + } + + impl ops::SubAssign<$type_name> for Address { + fn sub_assign(&mut self, other: $type_name) { + if other >= 0 { + *self = Self { + 0: self.0 - (other as umem), + } + } else { + *self = Self { + 0: self.0 + (-other as umem), + } + } + } + } + }; +} + +impl_address_arithmetic_signed!(i8); +impl_address_arithmetic_signed!(i16); +impl_address_arithmetic_signed!(i32); +impl_address_arithmetic_signed!(i64); +#[cfg(all(feature = "128_bit_mem", not(feature = "64_bit_mem")))] +impl_address_arithmetic_signed!(i128); +impl_address_arithmetic_signed!(isize); +impl_address_arithmetic_unsigned!(u8); +impl_address_arithmetic_unsigned!(u16); +impl_address_arithmetic_unsigned!(u32); +impl_address_arithmetic_unsigned!(u64); +#[cfg(all(feature = "128_bit_mem", not(feature = "64_bit_mem")))] +impl_address_arithmetic_unsigned!(u128); +impl_address_arithmetic_unsigned!(usize); + +/// Adds any compatible type reference to Address +impl<'a, T: Into + Copy> ops::Add<&'a T> for Address { + type Output = Self; + + fn add(self, other: &'a T) -> Self { + Self(self.0 + (*other).into()) } } /// Subtracts any compatible type reference to Address -impl<'a, T: Into + Copy> ops::Sub<&'a T> for Address { +impl<'a, T: Into + Copy> ops::Sub<&'a T> for Address { type Output = Self; fn sub(self, other: &'a T) -> Self { - Self { - 0: self.0 - (*other).into(), - } + Self(self.0 - (*other).into()) } } -/// Subtracts a `usize` from a `Address`. +/// Subtracts a `Address` from a `Address` resulting in a `umem`. /// /// # Examples /// /// ``` /// use memflow::types::Address; /// -/// let mut addr = Address::from(10); -/// addr -= 5; -/// assert_eq!(addr, Address::from(5)); -/// +/// assert_eq!(Address::from(10) - 5, Address::from(5)); /// ``` -impl ops::SubAssign for Address { - fn sub_assign(&mut self, other: usize) { - *self = Self { - 0: self.0 - (other as u64), +impl ops::Sub for Address { + type Output = imem; + + fn sub(self, other: Self) -> imem { + if self.0 > other.0 { + (self.0 - other.0) as imem + } else { + -((other.0 - self.0) as imem) } } } @@ -409,61 +679,74 @@ mod tests { #[test] fn test_null_valid() { - assert_eq!(Address::null().is_null(), true); - assert_eq!(Address::invalid().is_valid(), false); + assert!(Address::null().is_null()); + assert!(!Address::invalid().is_valid()); } #[test] fn test_from() { - assert_eq!(Address::from(1337).as_u64(), 1337); - assert_eq!(Address::from(4321).as_usize(), 4321); + assert_eq!(Address::from(1337_u32).to_umem(), 1337); + assert_eq!(Address::from(4321_u64).to_umem(), 4321); } #[test] fn test_alignment() { assert_eq!( - Address::from(0x1234).as_page_aligned(size::kb(4)), - Address::from(0x1000) + Address::from(0x1234_u64).as_page_aligned(size::kb(4)), + Address::from(0x1000_u64) ); assert_eq!( - Address::from(0xFFF1_2345u64).as_page_aligned(0x10000), - Address::from(0xFFF1_0000u64) + Address::from(0xFFF1_2345_u64).as_page_aligned(0x10000), + Address::from(0xFFF1_0000_u64) ); } #[test] fn test_bits() { - assert_eq!(Address::from(1).bit_at(0), true); - assert_eq!(Address::from(1).bit_at(1), false); - assert_eq!(Address::from(1).bit_at(2), false); - assert_eq!(Address::from(1).bit_at(3), false); + assert!(Address::from(1_u64).bit_at(0)); + assert!(!Address::from(1_u64).bit_at(1)); + assert!(!Address::from(1_u64).bit_at(2)); + assert!(!Address::from(1_u64).bit_at(3)); - assert_eq!(Address::from(2).bit_at(0), false); - assert_eq!(Address::from(2).bit_at(1), true); - assert_eq!(Address::from(2).bit_at(2), false); - assert_eq!(Address::from(2).bit_at(3), false); + assert!(!Address::from(2_u64).bit_at(0)); + assert!(Address::from(2_u64).bit_at(1)); + assert!(!Address::from(2_u64).bit_at(2)); + assert!(!Address::from(2_u64).bit_at(3)); - assert_eq!(Address::from(13).bit_at(0), true); - assert_eq!(Address::from(13).bit_at(1), false); - assert_eq!(Address::from(13).bit_at(2), true); - assert_eq!(Address::from(13).bit_at(3), true); + assert!(Address::from(13_u64).bit_at(0)); + assert!(!Address::from(13_u64).bit_at(1)); + assert!(Address::from(13_u64).bit_at(2)); + assert!(Address::from(13_u64).bit_at(3)); } #[test] fn test_bit_mask() { - assert_eq!(Address::bit_mask(0..11).as_u64(), 0xfff); - assert_eq!(Address::bit_mask(12..20).as_u64(), 0x001f_f000); - assert_eq!(Address::bit_mask(21..29).as_u64(), 0x3fe0_0000); - assert_eq!(Address::bit_mask(30..38).as_u64(), 0x007f_c000_0000); - assert_eq!(Address::bit_mask(39..47).as_u64(), 0xff80_0000_0000); - assert_eq!(Address::bit_mask(12..51).as_u64(), 0x000f_ffff_ffff_f000); + assert_eq!(Address::bit_mask(0..=11).to_umem(), 0xfff); + assert_eq!(Address::bit_mask(12..=20).to_umem(), 0x001f_f000); + assert_eq!(Address::bit_mask(21..=29).to_umem(), 0x3fe0_0000); + assert_eq!(Address::bit_mask(30..=38).to_umem(), 0x007f_c000_0000); + assert_eq!(Address::bit_mask(39..=47).to_umem(), 0xff80_0000_0000); + assert_eq!(Address::bit_mask(12..=51).to_umem(), 0x000f_ffff_ffff_f000); + } + + #[test] + fn test_bit_mask_u8() { + assert_eq!(Address::bit_mask_u8(0..=11).to_umem(), 0xfff); + assert_eq!(Address::bit_mask_u8(12..=20).to_umem(), 0x001f_f000); + assert_eq!(Address::bit_mask_u8(21..=29).to_umem(), 0x3fe0_0000); + assert_eq!(Address::bit_mask_u8(30..=38).to_umem(), 0x007f_c000_0000); + assert_eq!(Address::bit_mask_u8(39..=47).to_umem(), 0xff80_0000_0000); + assert_eq!( + Address::bit_mask_u8(12..=51).to_umem(), + 0x000f_ffff_ffff_f000 + ); } #[test] fn test_ops() { - assert_eq!(Address::from(10) + 5usize, Address::from(15)); + assert_eq!(Address::from(10_u64) + 5usize, Address::from(15_u64)); - assert_eq!(Address::from(10) - Address::from(5), 5usize); - assert_eq!(Address::from(100) - 5usize, Address::from(95)); + assert_eq!(Address::from(10_u64) - Address::from(5_u64), 5); + assert_eq!(Address::from(100_u64) - 5usize, Address::from(95_u64)); } } diff --git a/apex_dma/memflow_lib/memflow/src/types/byte_swap.rs b/apex_dma/memflow_lib/memflow/src/types/byte_swap.rs index 531004c..5be3f96 100644 --- a/apex_dma/memflow_lib/memflow/src/types/byte_swap.rs +++ b/apex_dma/memflow_lib/memflow/src/types/byte_swap.rs @@ -15,7 +15,7 @@ use core::marker::PhantomData; /// /// ``` /// use memflow::types::ByteSwap; -/// use memflow_derive::*; +/// use memflow::derive::*; /// /// #[repr(C)] /// #[derive(ByteSwap)] diff --git a/apex_dma/memflow_lib/memflow/src/mem/cache/count_validator.rs b/apex_dma/memflow_lib/memflow/src/types/cache/count_validator.rs similarity index 77% rename from apex_dma/memflow_lib/memflow/src/mem/cache/count_validator.rs rename to apex_dma/memflow_lib/memflow/src/types/cache/count_validator.rs index 73cbdff..95303bb 100644 --- a/apex_dma/memflow_lib/memflow/src/mem/cache/count_validator.rs +++ b/apex_dma/memflow_lib/memflow/src/types/cache/count_validator.rs @@ -1,14 +1,13 @@ -/*! -Validators are used when working with caches and determine for how long -a specific cache entry stays valid. +//! Validators are used when working with caches and determine for how long +//! a specific cache entry stays valid. +//! +//! This validator limits the cache time based on an actual time instant. +//! Internally it uses the [coarsetime](https://docs.rs/coarsetime/0.1.14/coarsetime/) crate as a less +//! computation intensive alternative for [std::time](https://doc.rust-lang.org/std/time/index.html). +//! Therefor the Duration has to be converted (e.g. via the .into() trait) when constructing this validator. +//! +//! The default implementation will set the cache time to 1 second. -This validator limits the cache time based on an actual time instant. -Internally it uses the [coarsetime](https://docs.rs/coarsetime/0.1.14/coarsetime/) crate as a less -computation intensive alternative for [std::time](https://doc.rust-lang.org/std/time/index.html). -Therefor the Duration has to be converted (e.g. via the .into() trait) when constructing this validator. - -The default implementation will set the cache time to 1 second. -*/ use std::prelude::v1::*; use super::CacheValidator; @@ -41,7 +40,7 @@ impl CountCacheValidator { /// /// # Examples: /// ``` - /// use memflow::mem::{CacheValidator, CountCacheValidator}; + /// use memflow::types::cache::{CacheValidator, CountCacheValidator}; /// /// let mut validator = CountCacheValidator::new(100); /// diff --git a/apex_dma/memflow_lib/memflow/src/mem/cache/mod.rs b/apex_dma/memflow_lib/memflow/src/types/cache/mod.rs similarity index 51% rename from apex_dma/memflow_lib/memflow/src/mem/cache/mod.rs rename to apex_dma/memflow_lib/memflow/src/types/cache/mod.rs index 364da73..438bdc6 100644 --- a/apex_dma/memflow_lib/memflow/src/mem/cache/mod.rs +++ b/apex_dma/memflow_lib/memflow/src/types/cache/mod.rs @@ -1,19 +1,8 @@ -pub mod cached_memory_access; -pub mod cached_vat; - #[cfg(feature = "std")] pub mod timed_validator; pub mod count_validator; -mod page_cache; -mod tlb_cache; - -#[doc(hidden)] -pub use cached_memory_access::*; -#[doc(hidden)] -pub use cached_vat::*; - #[cfg(feature = "std")] #[doc(hidden)] pub use timed_validator::*; @@ -26,17 +15,33 @@ pub type DefaultCacheValidator = TimedCacheValidator; #[cfg(not(feature = "std"))] pub type DefaultCacheValidator = CountCacheValidator; -use crate::types::PageType; - /// Validators are used when working with caches and determine for how long /// a specific cache entry stays valid. pub trait CacheValidator where Self: Send, { + // TODO: better docs + + /// Allocate the required amount of slots used for validation fn allocate_slots(&mut self, slot_count: usize); - fn update_validity(&mut self); + + /// Invoked once per Read/Write so internal state can be updated if necessary. + /// + /// This is an optimization so things like `std::time::Instant` only need to be computed once. + fn update_validity(&mut self) { + // no-op + } + + /// Checks wether or not the given slot is valid. fn is_slot_valid(&self, slot_id: usize) -> bool; + + /// Callback from the cache implementation when a page is cached + /// and the slot should become valid. fn validate_slot(&mut self, slot_id: usize); + + /// Callback from the caching implementation to mark a slot as invalid. + /// + /// This can happen if two different cache entries fall into the same slot id. fn invalidate_slot(&mut self, slot_id: usize); } diff --git a/apex_dma/memflow_lib/memflow/src/mem/cache/timed_validator.rs b/apex_dma/memflow_lib/memflow/src/types/cache/timed_validator.rs similarity index 71% rename from apex_dma/memflow_lib/memflow/src/mem/cache/timed_validator.rs rename to apex_dma/memflow_lib/memflow/src/types/cache/timed_validator.rs index 6850b74..3a7deb7 100644 --- a/apex_dma/memflow_lib/memflow/src/mem/cache/timed_validator.rs +++ b/apex_dma/memflow_lib/memflow/src/types/cache/timed_validator.rs @@ -1,14 +1,13 @@ -/*! -Validators are used when working with caches and determine for how long -a specific cache entry stays valid. +//! Validators are used when working with caches and determine for how long +//! a specific cache entry stays valid. +//! +//! This validator limits the cache time based on an actual time instant. +//! Internally it uses the [coarsetime](https://docs.rs/coarsetime/0.1.14/coarsetime/) crate as a less +//! computation intensive alternative for [std::time](https://doc.rust-lang.org/std/time/index.html). +//! Therefor the Duration has to be converted (e.g. via the .into() trait) when constructing this validator. +//! +//! The default implementation will set the cache time to 1 second. -This validator limits the cache time based on an actual time instant. -Internally it uses the [coarsetime](https://docs.rs/coarsetime/0.1.14/coarsetime/) crate as a less -computation intensive alternative for [std::time](https://doc.rust-lang.org/std/time/index.html). -Therefor the Duration has to be converted (e.g. via the .into() trait) when constructing this validator. - -The default implementation will set the cache time to 1 second. -*/ use std::prelude::v1::*; use super::CacheValidator; @@ -41,7 +40,7 @@ impl TimedCacheValidator { /// # Examples: /// ``` /// use std::time::Duration; - /// use memflow::mem::TimedCacheValidator; + /// use memflow::types::cache::TimedCacheValidator; /// /// let _ = TimedCacheValidator::new(Duration::from_millis(5000).into()); /// ``` @@ -57,6 +56,7 @@ impl TimedCacheValidator { impl CacheValidator for TimedCacheValidator { #[inline] fn allocate_slots(&mut self, slot_count: usize) { + // allocate all slots in an invalid state self.time .resize(slot_count, self.last_time - self.valid_time); } @@ -78,6 +78,7 @@ impl CacheValidator for TimedCacheValidator { #[inline] fn invalidate_slot(&mut self, slot_id: usize) { + // set this slot to invalid self.time[slot_id] = self.last_time - self.valid_time } } diff --git a/apex_dma/memflow_lib/memflow/src/types/gap_remover.rs b/apex_dma/memflow_lib/memflow/src/types/gap_remover.rs new file mode 100644 index 0000000..d980106 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/types/gap_remover.rs @@ -0,0 +1,74 @@ +use crate::mem::mem_data::{MemoryRange, MemoryRangeCallback}; +use crate::types::{imem, umem, Address, PageType}; +use cglue::prelude::v1::*; +use std::prelude::v1::*; + +pub struct GapRemover<'a> { + map: rangemap::RangeMap, + out: Option>, + gap_size: imem, + start: Address, + end: Address, +} + +impl<'a> GapRemover<'a> { + pub fn new(out: MemoryRangeCallback<'a>, gap_size: imem, start: Address, end: Address) -> Self { + Self { + map: Default::default(), + out: Some(out), + gap_size, + start, + end, + } + } + + pub fn push_range(&mut self, CTup3(in_virtual, size, page_type): MemoryRange) { + self.map.insert(in_virtual..(in_virtual + size), page_type); + } +} + +impl<'a> Drop for GapRemover<'a> { + fn drop(&mut self) { + self.map + .gaps(&(self.start..self.end)) + .filter_map(|r| { + assert!(r.end >= r.start); + if self.gap_size >= 0 && (r.end - r.start) as umem <= self.gap_size as umem { + if r.start.to_umem() > 0 { + let next = r.end; + let prev = r.start - 1 as umem; + match (self.map.get(&prev), self.map.get(&next)) { + (Some(p1), Some(p2)) if p1 == p2 => Some((r, *p2)), + _ => None, + } + } else { + None + } + } else { + None + } + }) + .collect::>() + .into_iter() + .for_each(|(r, p)| self.map.insert(r, p)); + + self.map + .iter() + .map(|(r, p)| { + let address = r.start; + assert!(r.end >= address); + let size = r.end - address; + CTup3(address, size as umem, *p) + }) + .feed_into(self.out.take().unwrap()); + } +} + +impl<'a> Extend for GapRemover<'a> { + fn extend(&mut self, iter: T) + where + T: IntoIterator, + { + iter.into_iter().for_each(|r| self.push_range(r)); + } +} diff --git a/apex_dma/memflow_lib/memflow/src/types/mem.rs b/apex_dma/memflow_lib/memflow/src/types/mem.rs new file mode 100644 index 0000000..ab45aeb --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/types/mem.rs @@ -0,0 +1,7 @@ +//! Common type aliases for cross-platform introspection + +/// Unsigned memory type. This type is guaranteed to hold a whole address of introspection target. +pub type umem = u64; + +/// Signed memory type. This type is useful for address differences. +pub type imem = i64; diff --git a/apex_dma/memflow_lib/memflow/src/types/mem_units.rs b/apex_dma/memflow_lib/memflow/src/types/mem_units.rs new file mode 100644 index 0000000..80ad229 --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/types/mem_units.rs @@ -0,0 +1,72 @@ +//! This module contains helper functions for creating various byte sizes. +//! All function are const and will be [optimized](https://rust.godbolt.org/z/T6LiwJ) by rustc. +use super::{imem, umem}; + +#[macro_export] +macro_rules! impl_unit_helper { + ($mod_name:ident, $type_name:ident) => { + pub mod $mod_name { + + pub use super::*; + + /// Returns a umem representing the length in bytes from the given number of kilobytes. + pub const fn kb(kb: $type_name) -> $type_name { + kb * 1024 + } + + /// Returns a $type_name representing the length in bytes from the given number of kilobits. + pub const fn kib(kib: $type_name) -> $type_name { + kb(kib) / 8 + } + + /// Returns a $type_name representing the length in bytes from the given number of megabytes. + pub const fn mb(mb: $type_name) -> $type_name { + kb(mb) * 1024 + } + + /// Returns a $type_name representing the length in bytes from the given number of megabits. + pub const fn mib(mib: $type_name) -> $type_name { + mb(mib) / 8 + } + + /// Returns a $type_name representing the length in bytes from the given number of gigabytes. + pub const fn gb(gb: $type_name) -> $type_name { + mb(gb) * 1024 + } + + /// Returns a $type_name representing the length in bytes from the given number of gigabits. + pub const fn gib(gib: $type_name) -> $type_name { + gb(gib) / 8 + } + } + }; +} + +impl_unit_helper!(size, usize); +impl_unit_helper!(mem, umem); +impl_unit_helper!(ssize, isize); +impl_unit_helper!(smem, imem); + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_from() { + assert_eq!(size::kb(20), 20480); + assert_eq!(size::kib(123), 15744); + assert_eq!(size::mb(20), 20_971_520); + assert_eq!(size::mib(52), 6_815_744); + assert_eq!(size::gb(2), 2_147_483_648); + #[cfg(pointer_width = "64")] + { + assert_eq!(size::gb(20), 21_474_836_480); + assert_eq!(size::gib(52), 6_979_321_856); + } + #[cfg(feature = "64_bit_mem")] + { + assert_eq!(mem::gb(20), 21_474_836_480); + assert_eq!(mem::gib(52), 6_979_321_856); + } + } +} diff --git a/apex_dma/memflow_lib/memflow/src/types/mod.rs b/apex_dma/memflow_lib/memflow/src/types/mod.rs index 3efcba0..2e3192e 100644 --- a/apex_dma/memflow_lib/memflow/src/types/mod.rs +++ b/apex_dma/memflow_lib/memflow/src/types/mod.rs @@ -1,33 +1,30 @@ -/*! -Module with basic types used in memflow. - -This module contains types for handling virtual and physical addresses. -It also contains types for handling pointers, pages and -it exposes different size helpers. -*/ +//! Module with basic types used in memflow. +//! +//! This module contains types for handling virtual and physical addresses. +//! It also contains types for handling pointers, pages and +//! it exposes different size helpers. pub mod address; -#[doc(hidden)] -pub use address::Address; +pub use address::{ + clamp_to_isize, clamp_to_usize, imem, umem, Address, PrimitiveAddress, UMEM_BITS, +}; -pub mod size; +mod mem_units; +pub use mem_units::*; pub mod page; -#[doc(hidden)] pub use page::{Page, PageType}; pub mod physical_address; -#[doc(hidden)] pub use physical_address::PhysicalAddress; -pub mod pointer32; -#[doc(hidden)] -pub use pointer32::Pointer32; - -pub mod pointer64; -#[doc(hidden)] -pub use pointer64::Pointer64; +pub mod pointer; +pub use pointer::{Pointer, Pointer32, Pointer64}; pub mod byte_swap; -#[doc(hidden)] pub use byte_swap::ByteSwap; + +pub mod cache; +pub use cache::{CacheValidator, DefaultCacheValidator}; + +pub mod gap_remover; diff --git a/apex_dma/memflow_lib/memflow/src/types/page.rs b/apex_dma/memflow_lib/memflow/src/types/page.rs index 77af8fe..23aba54 100644 --- a/apex_dma/memflow_lib/memflow/src/types/page.rs +++ b/apex_dma/memflow_lib/memflow/src/types/page.rs @@ -2,12 +2,13 @@ This module contains data structures related to information about a page. */ -use super::Address; +use super::{umem, Address}; bitflags! { /// Describes the type of a page using a bitflag. - #[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] #[repr(transparent)] + #[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] + #[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))] pub struct PageType: u8 { /// The page explicitly has no flags. const NONE = 0b0000_0000; @@ -62,14 +63,17 @@ impl Default for PageType { /// A `Page` holds information about a memory page. /// /// More information about paging can be found [here](https://en.wikipedia.org/wiki/Paging). +#[repr(C)] #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] +#[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))] pub struct Page { /// Contains the page type (see above). pub page_type: PageType, /// Contains the base address of this page. pub page_base: Address, /// Contains the size of this page. - pub page_size: usize, + pub page_size: umem, } impl Page { diff --git a/apex_dma/memflow_lib/memflow/src/types/physical_address.rs b/apex_dma/memflow_lib/memflow/src/types/physical_address.rs index 0b37382..ea8363d 100644 --- a/apex_dma/memflow_lib/memflow/src/types/physical_address.rs +++ b/apex_dma/memflow_lib/memflow/src/types/physical_address.rs @@ -2,7 +2,7 @@ Abstraction over a physical address with optional page information. */ -use super::{Address, Page, PageType}; +use super::{umem, Address, Page, PageType, Pointer, PrimitiveAddress}; use std::fmt; @@ -15,61 +15,16 @@ use std::fmt; /// /// Most architectures have support multiple page sizes (see [huge pages](todo.html)) /// which will be represented by the containing `page` of the `PhysicalAddress` struct. +#[repr(C)] #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] #[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))] -#[repr(C)] +#[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))] pub struct PhysicalAddress { - address: Address, - page_type: PageType, + pub address: Address, + pub page_type: PageType, page_size_log2: u8, } -/// Converts a `Address` into a `PhysicalAddress` with no page information attached. -impl From
for PhysicalAddress { - fn from(address: Address) -> Self { - Self { - address, - page_type: PageType::UNKNOWN, - page_size_log2: 0, - } - } -} - -/// Constructs an `PhysicalAddress` from a `i32` value. -impl From for PhysicalAddress { - fn from(item: i32) -> Self { - Self::from(Address::from(item)) - } -} - -/// Constructs an `PhysicalAddress` from a `u32` value. -impl From for PhysicalAddress { - fn from(item: u32) -> Self { - Self::from(Address::from(item)) - } -} - -/// Constructs an `PhysicalAddress` from a `u64` value. -impl From for PhysicalAddress { - fn from(item: u64) -> Self { - Self::from(Address::from(item)) - } -} - -/// Constructs an `PhysicalAddress` from a `usize` value. -impl From for PhysicalAddress { - fn from(item: usize) -> Self { - Self::from(Address::from(item)) - } -} - -/// Converts a `PhysicalAddress` into a `Address`. -impl From for Address { - fn from(address: PhysicalAddress) -> Self { - Self::from(address.address.as_u64()) - } -} - impl PhysicalAddress { /// A physical address with a value of zero. pub const NULL: PhysicalAddress = PhysicalAddress { @@ -97,16 +52,15 @@ impl PhysicalAddress { /// /// Note: The page size must be a power of 2. #[inline] - pub fn with_page(address: Address, page_type: PageType, page_size: usize) -> Self { + pub fn with_page(address: Address, page_type: PageType, page_size: umem) -> Self { Self { address, page_type, // TODO: this should be replaced by rust's internal functions as this is not endian aware // once it is stabilizied in rust // see issue: https://github.com/rust-lang/rust/issues/70887 - page_size_log2: (std::mem::size_of::() * 8 - - (page_size as u64).to_le().leading_zeros() as usize) - as u8 + page_size_log2: (std::mem::size_of::() * 8 + - page_size.to_le().leading_zeros() as usize) as u8 - 2, } } @@ -149,8 +103,8 @@ impl PhysicalAddress { /// Returns the size of the page this physical address is contained in. #[inline] - pub fn page_size(&self) -> usize { - (2 << self.page_size_log2) as usize + pub fn page_size(&self) -> umem { + (2 << self.page_size_log2) as umem } /// Returns the base address of the containing page. @@ -158,7 +112,7 @@ impl PhysicalAddress { if !self.has_page() { Address::INVALID } else { - self.address.as_page_aligned(self.page_size()) + self.address.as_mem_aligned(self.page_size()) } } @@ -172,22 +126,10 @@ impl PhysicalAddress { } } - /// Returns the containing address converted to a u32. - #[inline] - pub const fn as_u32(&self) -> u32 { - self.address.as_u32() - } - - /// Returns the internal u64 value of the address. - #[inline] - pub const fn as_u64(&self) -> u64 { - self.address.as_u64() - } - - /// Returns the containing address converted to a usize. + /// Returns the containing address converted to a raw [`umem`]. #[inline] - pub const fn as_usize(&self) -> usize { - self.address.as_usize() + pub const fn to_umem(self) -> umem { + self.address.to_umem() } } @@ -198,6 +140,76 @@ impl Default for PhysicalAddress { } } +#[macro_export] +macro_rules! impl_physical_address_from { + ($type_name:ident) => { + impl From<$type_name> for PhysicalAddress { + fn from(item: $type_name) -> Self { + Self { + address: (item as umem).into(), + page_type: PageType::UNKNOWN, + page_size_log2: 0, + } + } + } + + impl From> for PhysicalAddress { + #[inline(always)] + fn from(ptr: Pointer<$type_name, T>) -> Self { + Self { + address: (ptr.inner as umem).into(), + page_type: PageType::UNKNOWN, + page_size_log2: 0, + } + } + } + }; +} + +// u16, u32, u64 is handled by the PrimitiveAddress implementation below. +impl_physical_address_from!(i8); +impl_physical_address_from!(u8); +impl_physical_address_from!(i16); +//impl_physical_address_from!(u16); +impl_physical_address_from!(i32); +//impl_physical_address_from!(u32); +impl_physical_address_from!(i64); +//impl_physical_address_from!(u64); +impl_physical_address_from!(usize); + +impl From for PhysicalAddress { + #[inline(always)] + fn from(val: U) -> Self { + Self { + address: val.to_umem().into(), + page_type: PageType::UNKNOWN, + page_size_log2: 0, + } + } +} + +impl From> for PhysicalAddress { + #[inline(always)] + fn from(ptr: Pointer) -> Self { + Self { + address: ptr.inner.to_umem().into(), + page_type: PageType::UNKNOWN, + page_size_log2: 0, + } + } +} + +/// Converts a `PhysicalAddress` into a `Address`. +impl From
for PhysicalAddress { + fn from(address: Address) -> Self { + Self { + address, + page_type: PageType::UNKNOWN, + page_size_log2: 0, + } + } +} + impl fmt::Debug for PhysicalAddress { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:x}", self.address) @@ -223,22 +235,22 @@ impl fmt::Display for PhysicalAddress { #[cfg(test)] mod tests { - use super::super::size; + use super::super::mem; use super::*; #[test] fn test_page_size() { - let pa = PhysicalAddress::with_page(Address::from(0x1234), PageType::UNKNOWN, 0x1000); + let pa = PhysicalAddress::with_page(Address::from(0x1234_u64), PageType::UNKNOWN, 0x1000); assert_eq!(pa.page_size(), 0x1000); - assert_eq!(pa.page_base(), Address::from(0x1000)); + assert_eq!(pa.page_base(), Address::from(0x1000_u64)); } #[test] fn test_page_size_invalid() { - let pa_42 = PhysicalAddress::with_page(Address::from(0x1234), PageType::UNKNOWN, 42); + let pa_42 = PhysicalAddress::with_page(Address::from(0x1234_u64), PageType::UNKNOWN, 42); assert_ne!(pa_42.page_size(), 42); - let pa_0 = PhysicalAddress::with_page(Address::from(0x1234), PageType::UNKNOWN, 42); + let pa_0 = PhysicalAddress::with_page(Address::from(0x1234_u64), PageType::UNKNOWN, 42); assert_ne!(pa_0.page_size(), 0); } @@ -246,13 +258,13 @@ mod tests { #[allow(clippy::unreadable_literal)] fn test_page_size_huge() { let pa_2mb = - PhysicalAddress::with_page(Address::from(0x123456), PageType::UNKNOWN, size::mb(2)); - assert_eq!(pa_2mb.page_size(), size::mb(2)); - assert_eq!(pa_2mb.page_base(), Address::from(0)); + PhysicalAddress::with_page(Address::from(0x123456_u64), PageType::UNKNOWN, mem::mb(2)); + assert_eq!(pa_2mb.page_size(), mem::mb(2)); + assert_eq!(pa_2mb.page_base(), Address::from(0_u64)); let pa_1gb = - PhysicalAddress::with_page(Address::from(0x1234567), PageType::UNKNOWN, size::gb(1)); - assert_eq!(pa_1gb.page_size(), size::gb(1)); - assert_eq!(pa_1gb.page_base(), Address::from(0)); + PhysicalAddress::with_page(Address::from(0x1234567_u64), PageType::UNKNOWN, mem::gb(1)); + assert_eq!(pa_1gb.page_size(), mem::gb(1)); + assert_eq!(pa_1gb.page_base(), Address::from(0_u64)); } } diff --git a/apex_dma/memflow_lib/memflow/src/types/pointer.rs b/apex_dma/memflow_lib/memflow/src/types/pointer.rs new file mode 100644 index 0000000..7b0830d --- /dev/null +++ b/apex_dma/memflow_lib/memflow/src/types/pointer.rs @@ -0,0 +1,539 @@ +/*! +Pointer abstraction. +*/ + +use crate::cglue::ReprCString; +use crate::dataview::Pod; +use crate::error::{PartialError, PartialResult}; +use crate::mem::MemoryView; +use crate::types::{imem, umem, Address, ByteSwap, PrimitiveAddress}; + +use std::convert::TryInto; +use std::marker::PhantomData; +use std::mem::size_of; +use std::{cmp, fmt, hash, ops}; + +pub type Pointer32 = Pointer; +pub type Pointer64 = Pointer; + +const _: [(); std::mem::size_of::>()] = [(); std::mem::size_of::()]; +const _: [(); std::mem::size_of::>()] = [(); std::mem::size_of::()]; + +/// This type can be used in structs that are being read from the target memory. +/// It holds a phantom type that can be used to describe the proper type of the pointer +/// and to read it in a more convenient way. +/// +/// This module is a direct adaption of [CasualX's great IntPtr crate](https://github.com/CasualX/intptr). +/// +/// Generally the generic Type should implement the Pod trait to be read into easily. +/// See [here](https://docs.rs/dataview/0.1.1/dataview/) for more information on the Pod trait. +/// +/// # Examples +/// +/// ``` +/// use memflow::types::Pointer64; +/// use memflow::mem::MemoryView; +/// use memflow::dataview::Pod; +/// +/// #[repr(C)] +/// #[derive(Clone, Debug, Pod)] +/// struct Foo { +/// pub some_value: i64, +/// } +/// +/// #[repr(C)] +/// #[derive(Clone, Debug, Pod)] +/// struct Bar { +/// pub foo_ptr: Pointer64, +/// } +/// +/// fn read_foo_bar(mem: &mut impl MemoryView) { +/// let bar: Bar = mem.read(0x1234.into()).unwrap(); +/// let foo = bar.foo_ptr.read(mem).unwrap(); +/// println!("value: {}", foo.some_value); +/// } +/// +/// # use memflow::types::size; +/// # use memflow::dummy::DummyOs; +/// # use memflow::os::Process; +/// # read_foo_bar(&mut DummyOs::quick_process(size::mb(2), &[])); +/// ``` +/// +/// ``` +/// use memflow::types::Pointer64; +/// use memflow::mem::MemoryView; +/// use memflow::dataview::Pod; +/// +/// #[repr(C)] +/// #[derive(Clone, Debug, Pod)] +/// struct Foo { +/// pub some_value: i64, +/// } +/// +/// #[repr(C)] +/// #[derive(Clone, Debug, Pod)] +/// struct Bar { +/// pub foo_ptr: Pointer64, +/// } +/// +/// fn read_foo_bar(mem: &mut impl MemoryView) { +/// let bar: Bar = mem.read(0x1234.into()).unwrap(); +/// let foo = mem.read_ptr(bar.foo_ptr).unwrap(); +/// println!("value: {}", foo.some_value); +/// } +/// +/// # use memflow::dummy::DummyOs; +/// # use memflow::os::Process; +/// # use memflow::types::size; +/// # read_foo_bar(&mut DummyOs::quick_process(size::mb(2), &[])); +/// ``` +#[repr(transparent)] +#[cfg_attr(feature = "serde", derive(::serde::Serialize))] +pub struct Pointer { + pub inner: U, + phantom_data: PhantomData T>, +} +unsafe impl Pod for Pointer {} + +impl Pointer { + const PHANTOM_DATA: PhantomData T> = PhantomData; + + /// Returns a pointer64 with a value of zero. + /// + /// # Examples + /// + /// ``` + /// use memflow::types::Pointer64; + /// + /// println!("pointer: {}", Pointer64::<()>::null()); + /// ``` + #[inline] + pub fn null() -> Self { + Pointer { + inner: U::null(), + phantom_data: PhantomData, + } + } + + /// Returns `true` if the pointer64 is null. + /// + /// # Examples + /// + /// ``` + /// use memflow::types::Pointer32; + /// + /// let ptr = Pointer32::<()>::from(0x1000u32); + /// assert!(!ptr.is_null()); + /// ``` + #[inline] + pub fn is_null(self) -> bool { + self.inner.is_null() + } + + /// Converts the pointer64 to an Option that is None when it is null + /// + /// # Examples + /// + /// ``` + /// use memflow::types::Pointer64; + /// + /// assert_eq!(Pointer64::<()>::null().non_null(), None); + /// assert_eq!(Pointer64::<()>::from(0x1000u64).non_null(), Some(Pointer64::from(0x1000u64))); + /// ``` + #[inline] + pub fn non_null(self) -> Option> { + if self.is_null() { + None + } else { + Some(self) + } + } + + /// Converts the pointer into a raw `umem` value. + /// + /// # Examples + /// + /// ``` + /// use memflow::types::{Pointer64, umem}; + /// + /// let ptr = Pointer64::<()>::from(0x1000u64); + /// let ptr_umem: umem = ptr.to_umem(); + /// assert_eq!(ptr_umem, 0x1000); + /// ``` + #[inline] + pub fn to_umem(self) -> umem { + self.inner.to_umem() + } + + // Returns the address this pointer holds. + #[inline] + pub fn address(&self) -> Address { + Address::from(self.inner) + } +} + +impl Pointer { + /// Calculates the offset from a pointer64 + /// + /// `count` is in units of T; e.g., a `count` of 3 represents a pointer offset of `3 * size_of::()` bytes. + /// + /// # Panics + /// + /// This function panics if `T` is a Zero-Sized Type ("ZST"). + /// This function also panics when `offset * size_of::()` + /// causes overflow of a signed 64-bit integer. + /// + /// # Examples: + /// + /// ``` + /// use memflow::types::Pointer64; + /// + /// let ptr = Pointer64::::from(0x1000u64); + /// + /// println!("{:?}", ptr.offset(3)); + /// ``` + pub fn offset(self, count: imem) -> Self { + let pointee_size = U::from_umem(size_of::() as umem); + assert!(U::null() < pointee_size && pointee_size <= PrimitiveAddress::max()); + + if count >= 0 { + self.inner + .wrapping_add(U::from_umem(pointee_size.to_umem() * count as umem)) + .into() + } else { + self.inner + .wrapping_sub(U::from_umem(pointee_size.to_umem() * (-count) as umem)) + .into() + } + } + + /// Calculates the distance between two pointers. The returned value is in + /// units of T: the distance in bytes is divided by `mem::size_of::()`. + /// + /// This function is the inverse of [`offset`]. + /// + /// [`offset`]: #method.offset + /// + /// # Panics + /// + /// This function panics if `T` is a Zero-Sized Type ("ZST"). + /// + /// # Examples: + /// + /// ``` + /// use memflow::types::Pointer64; + /// + /// let ptr1 = Pointer64::::from(0x1000u64); + /// let ptr2 = Pointer64::::from(0x1008u64); + /// + /// assert_eq!(ptr2.offset_from(ptr1), 4); + /// assert_eq!(ptr1.offset_from(ptr2), -4); + /// ``` + pub fn offset_from(self, origin: Self) -> imem { + let pointee_size: imem = size_of::().try_into().unwrap(); + let offset = self.inner.to_imem().wrapping_sub(origin.inner.to_imem()); + offset / pointee_size as imem + } + + /// Calculates the offset from a pointer (convenience for `.offset(count as i64)`). + /// + /// `count` is in units of T; e.g., a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Panics + /// + /// This function panics if `T` is a Zero-Sized Type ("ZST"). + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use memflow::types::Pointer64; + /// + /// let ptr = Pointer64::::from(0x1000u64); + /// + /// println!("{:?}", ptr.add(3)); + /// ``` + #[allow(clippy::should_implement_trait)] + pub fn add(self, count: umem) -> Self { + self.offset(count as imem) + } + + /// Calculates the offset from a pointer (convenience for + /// `.offset((count as isize).wrapping_neg())`). + /// + /// `count` is in units of T; e.g., a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Panics + /// + /// This function panics if `T` is a Zero-Sized Type ("ZST"). + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use memflow::types::Pointer64; + /// + /// let ptr = Pointer64::::from(0x1000u64); + /// + /// println!("{:?}", ptr.sub(3)); + /// ``` + #[allow(clippy::should_implement_trait)] + pub fn sub(self, count: umem) -> Self { + self.offset((count as imem).wrapping_neg()) + } +} + +/// Implement special phys/virt read/write for Pod types +impl Pointer { + pub fn read_into(self, mem: &mut M, out: &mut T) -> PartialResult<()> { + mem.read_ptr_into(self, out) + } +} + +impl Pointer { + pub fn read(self, mem: &mut M) -> PartialResult { + mem.read_ptr(self) + } + + pub fn write(self, mem: &mut M, data: &T) -> PartialResult<()> { + mem.write_ptr(self, data) + } +} + +/// Implement special phys/virt read/write for CReprStr +impl Pointer { + pub fn read_string(self, mem: &mut M) -> PartialResult { + match mem.read_char_string(self.inner.to_umem().into()) { + Ok(s) => Ok(s.into()), + Err(PartialError::Error(e)) => Err(PartialError::Error(e)), + Err(PartialError::PartialVirtualRead(s)) => { + Err(PartialError::PartialVirtualRead(s.into())) + } + Err(PartialError::PartialVirtualWrite(s)) => { + Err(PartialError::PartialVirtualWrite(s.into())) + } + } + } +} + +impl Pointer { + pub fn decay(self) -> Pointer { + Pointer { + inner: self.inner, + phantom_data: Pointer::::PHANTOM_DATA, + } + } + + pub fn at(self, i: umem) -> Pointer { + let inner = self + .inner + .wrapping_add(U::from_umem(size_of::() as umem * i)); + Pointer { + inner, + phantom_data: Pointer::::PHANTOM_DATA, + } + } +} + +impl Copy for Pointer {} +impl Clone for Pointer { + #[inline(always)] + fn clone(&self) -> Pointer { + *self + } +} +impl Default for Pointer { + #[inline(always)] + fn default() -> Pointer { + Pointer::null() + } +} +impl Eq for Pointer {} +impl PartialEq for Pointer { + #[inline(always)] + fn eq(&self, rhs: &Pointer) -> bool { + self.inner == rhs.inner + } +} +impl PartialOrd for Pointer { + #[inline(always)] + fn partial_cmp(&self, rhs: &Pointer) -> Option { + Some(self.cmp(rhs)) + } +} +impl Ord for Pointer { + #[inline(always)] + fn cmp(&self, rhs: &Pointer) -> cmp::Ordering { + self.inner.cmp(&rhs.inner) + } +} +impl hash::Hash for Pointer { + #[inline(always)] + fn hash(&self, state: &mut H) { + self.inner.hash(state) + } +} +impl AsRef for Pointer { + #[inline(always)] + fn as_ref(&self) -> &U { + &self.inner + } +} +impl AsMut for Pointer { + #[inline(always)] + fn as_mut(&mut self) -> &mut U { + &mut self.inner + } +} + +// From implementations +impl From for Pointer { + #[inline(always)] + fn from(address: U) -> Pointer { + Pointer { + inner: address, + phantom_data: PhantomData, + } + } +} + +impl From
for Pointer64 { + #[inline(always)] + fn from(address: Address) -> Pointer64 { + Pointer { + inner: address.to_umem() as u64, + phantom_data: PhantomData, + } + } +} + +// Into implementations +impl, T: ?Sized> From> for umem { + #[inline(always)] + fn from(ptr: Pointer) -> umem { + let address: Address = ptr.inner.into(); + address.to_umem() + } +} + +// Arithmetic operations +impl ops::Add for Pointer { + type Output = Pointer; + #[inline(always)] + fn add(self, other: umem) -> Pointer { + let address = self.inner + U::from_umem(size_of::() as umem * other); + Pointer { + inner: address, + phantom_data: self.phantom_data, + } + } +} +impl ops::Sub for Pointer { + type Output = Pointer; + #[inline(always)] + fn sub(self, other: umem) -> Pointer { + let address = self.inner - U::from_umem(size_of::() as umem * other); + Pointer { + inner: address, + phantom_data: self.phantom_data, + } + } +} + +#[cfg(feature = "64_bit_mem")] +impl ops::Add for Pointer { + type Output = Pointer; + #[inline(always)] + fn add(self, other: usize) -> Pointer { + self + other as umem + } +} +#[cfg(feature = "64_bit_mem")] +impl ops::Sub for Pointer { + type Output = Pointer; + #[inline(always)] + fn sub(self, other: usize) -> Pointer { + self - other as umem + } +} + +impl fmt::Debug for Pointer { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:x}", self.inner) + } +} +impl fmt::UpperHex for Pointer { + #[inline(always)] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:X}", self.inner) + } +} +impl fmt::LowerHex for Pointer { + #[inline(always)] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:x}", self.inner) + } +} +impl fmt::Display for Pointer { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:x}", self.inner) + } +} + +impl ByteSwap for Pointer { + fn byte_swap(&mut self) { + self.inner.byte_swap(); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn offset32() { + let ptr8 = Pointer32::::from(0x1000u32); + assert_eq!(ptr8.offset(3).to_umem(), 0x1003); + assert_eq!(ptr8.offset(-5).to_umem(), 0xFFB); + + let ptr16 = Pointer32::::from(0x1000u32); + assert_eq!(ptr16.offset(3).to_umem(), 0x1006); + assert_eq!(ptr16.offset(-5).to_umem(), 0xFF6); + + let ptr32 = Pointer32::::from(0x1000u32); + assert_eq!(ptr32.offset(3).to_umem(), 0x100C); + assert_eq!(ptr32.offset(-5).to_umem(), 0xFEC); + } + + #[test] + fn offset64() { + let ptr8 = Pointer64::::from(0x1000u64); + assert_eq!(ptr8.offset(3).to_umem(), 0x1003); + assert_eq!(ptr8.offset(-5).to_umem(), 0xFFB); + + let ptr16 = Pointer64::::from(0x1000u64); + assert_eq!(ptr16.offset(3).to_umem(), 0x1006); + assert_eq!(ptr16.offset(-5).to_umem(), 0xFF6); + + let ptr32 = Pointer64::::from(0x1000u64); + assert_eq!(ptr32.offset(3).to_umem(), 0x100C); + assert_eq!(ptr32.offset(-5).to_umem(), 0xFEC); + + let ptr64 = Pointer64::::from(0x1000u64); + assert_eq!(ptr64.offset(3).to_umem(), 0x1018); + assert_eq!(ptr64.offset(-5).to_umem(), 0xFD8); + } + + #[test] + fn offset_from() { + let ptr1 = Pointer64::::from(0x1000u64); + let ptr2 = Pointer64::::from(0x1008u64); + + assert_eq!(ptr2.offset_from(ptr1), 4); + assert_eq!(ptr1.offset_from(ptr2), -4); + } +} diff --git a/apex_dma/memflow_lib/memflow/src/types/pointer32.rs b/apex_dma/memflow_lib/memflow/src/types/pointer32.rs deleted file mode 100644 index 3de9af6..0000000 --- a/apex_dma/memflow_lib/memflow/src/types/pointer32.rs +++ /dev/null @@ -1,270 +0,0 @@ -/*! -32-bit Pointer abstraction. -*/ - -use crate::error::PartialResult; -use crate::mem::VirtualMemory; -use crate::types::{Address, ByteSwap}; - -use std::marker::PhantomData; -use std::mem::size_of; -use std::{cmp, fmt, hash, ops}; - -use dataview::Pod; - -/// This type can be used in structs that are being read from the target memory. -/// It holds a phantom type that can be used to describe the proper type of the pointer -/// and to read it in a more convenient way. -/// -/// This module is a direct adaption of [CasualX's great IntPtr crate](https://github.com/CasualX/intptr). -/// -/// Generally the generic Type should implement the Pod trait to be read into easily. -/// See [here](https://docs.rs/dataview/0.1.1/dataview/) for more information on the Pod trait. -/// -/// # Examples -/// -/// ``` -/// use memflow::types::Pointer32; -/// use memflow::mem::VirtualMemory; -/// use dataview::Pod; -/// -/// #[repr(C)] -/// #[derive(Clone, Debug, Pod)] -/// struct Foo { -/// pub some_value: i32, -/// } -/// -/// #[repr(C)] -/// #[derive(Clone, Debug, Pod)] -/// struct Bar { -/// pub foo_ptr: Pointer32, -/// } -/// -/// fn read_foo_bar(virt_mem: &mut T) { -/// let bar: Bar = virt_mem.virt_read(0x1234.into()).unwrap(); -/// let foo = bar.foo_ptr.deref(virt_mem).unwrap(); -/// println!("value: {}", foo.some_value); -/// } -/// -/// # use memflow::mem::dummy::DummyMemory; -/// # use memflow::types::size; -/// # read_foo_bar(&mut DummyMemory::new_virt(size::mb(4), size::mb(2), &[]).0); -/// -/// ``` -/// -/// ``` -/// use memflow::types::Pointer32; -/// use memflow::mem::VirtualMemory; -/// use dataview::Pod; -/// -/// #[repr(C)] -/// #[derive(Clone, Debug, Pod)] -/// struct Foo { -/// pub some_value: i32, -/// } -/// -/// #[repr(C)] -/// #[derive(Clone, Debug, Pod)] -/// struct Bar { -/// pub foo_ptr: Pointer32, -/// } -/// -/// fn read_foo_bar(virt_mem: &mut T) { -/// let bar: Bar = virt_mem.virt_read(0x1234.into()).unwrap(); -/// let foo = virt_mem.virt_read_ptr32(bar.foo_ptr).unwrap(); -/// println!("value: {}", foo.some_value); -/// } -/// -/// # use memflow::mem::dummy::DummyMemory; -/// # use memflow::types::size; -/// # read_foo_bar(&mut DummyMemory::new_virt(size::mb(4), size::mb(2), &[]).0); -/// ``` -#[repr(transparent)] -#[cfg_attr(feature = "serde", derive(::serde::Serialize))] -pub struct Pointer32 { - pub address: u32, - phantom_data: PhantomData T>, -} - -impl Pointer32 { - const PHANTOM_DATA: PhantomData T> = PhantomData; - - /// A pointer with a value of zero. - pub const NULL: Pointer32 = Pointer32 { - address: 0, - phantom_data: PhantomData, - }; - - /// Returns a pointer with a value of zero. - pub fn null() -> Self { - Pointer32::NULL - } - - /// Checks wether the containing value of this pointer is zero. - pub fn is_null(self) -> bool { - self.address == 0 - } - - /// Returns the underlying raw u32 value of this pointer. - pub const fn into_raw(self) -> u32 { - self.address - } -} - -/// This function will deref the pointer directly into a Pod type. -impl Pointer32 { - pub fn deref_into(self, mem: &mut U, out: &mut T) -> PartialResult<()> { - mem.virt_read_ptr32_into(self, out) - } -} - -/// This function will return the Object this pointer is pointing towards. -impl Pointer32 { - pub fn deref(self, mem: &mut U) -> PartialResult { - mem.virt_read_ptr32(self) - } -} - -impl Pointer32<[T]> { - pub const fn decay(self) -> Pointer32 { - Pointer32 { - address: self.address, - phantom_data: Pointer32::::PHANTOM_DATA, - } - } - - pub const fn at(self, i: usize) -> Pointer32 { - let address = self.address + (i * size_of::()) as u32; - Pointer32 { - address, - phantom_data: Pointer32::::PHANTOM_DATA, - } - } -} - -impl Copy for Pointer32 {} -impl Clone for Pointer32 { - #[inline(always)] - fn clone(&self) -> Pointer32 { - *self - } -} -impl Default for Pointer32 { - #[inline(always)] - fn default() -> Pointer32 { - Pointer32::NULL - } -} -impl Eq for Pointer32 {} -impl PartialEq for Pointer32 { - #[inline(always)] - fn eq(&self, rhs: &Pointer32) -> bool { - self.address == rhs.address - } -} -impl PartialOrd for Pointer32 { - #[inline(always)] - fn partial_cmp(&self, rhs: &Pointer32) -> Option { - self.address.partial_cmp(&rhs.address) - } -} -impl Ord for Pointer32 { - #[inline(always)] - fn cmp(&self, rhs: &Pointer32) -> cmp::Ordering { - self.address.cmp(&rhs.address) - } -} -impl hash::Hash for Pointer32 { - #[inline(always)] - fn hash(&self, state: &mut H) { - self.address.hash(state) - } -} -impl AsRef for Pointer32 { - #[inline(always)] - fn as_ref(&self) -> &u32 { - &self.address - } -} -impl AsMut for Pointer32 { - #[inline(always)] - fn as_mut(&mut self) -> &mut u32 { - &mut self.address - } -} - -impl From for Pointer32 { - #[inline(always)] - fn from(address: u32) -> Pointer32 { - Pointer32 { - address, - phantom_data: PhantomData, - } - } -} -impl From> for Address { - #[inline(always)] - fn from(ptr: Pointer32) -> Address { - ptr.address.into() - } -} -impl From> for u32 { - #[inline(always)] - fn from(ptr: Pointer32) -> u32 { - ptr.address - } -} - -impl ops::Add for Pointer32 { - type Output = Pointer32; - #[inline(always)] - fn add(self, other: usize) -> Pointer32 { - let address = self.address + (other * size_of::()) as u32; - Pointer32 { - address, - phantom_data: self.phantom_data, - } - } -} -impl ops::Sub for Pointer32 { - type Output = Pointer32; - #[inline(always)] - fn sub(self, other: usize) -> Pointer32 { - let address = self.address - (other * size_of::()) as u32; - Pointer32 { - address, - phantom_data: self.phantom_data, - } - } -} - -impl fmt::Debug for Pointer32 { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:x}", self.address) - } -} -impl fmt::UpperHex for Pointer32 { - #[inline(always)] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:X}", self.address) - } -} -impl fmt::LowerHex for Pointer32 { - #[inline(always)] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:x}", self.address) - } -} -impl fmt::Display for Pointer32 { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:x}", self.address) - } -} - -unsafe impl Pod for Pointer32 {} - -impl ByteSwap for Pointer32 { - fn byte_swap(&mut self) { - self.address.byte_swap(); - } -} diff --git a/apex_dma/memflow_lib/memflow/src/types/pointer64.rs b/apex_dma/memflow_lib/memflow/src/types/pointer64.rs deleted file mode 100644 index 5e3be7c..0000000 --- a/apex_dma/memflow_lib/memflow/src/types/pointer64.rs +++ /dev/null @@ -1,269 +0,0 @@ -/*! -64-bit Pointer abstraction. -*/ - -use crate::error::PartialResult; -use crate::mem::VirtualMemory; -use crate::types::{Address, ByteSwap}; - -use std::marker::PhantomData; -use std::mem::size_of; -use std::{cmp, fmt, hash, ops}; - -use dataview::Pod; - -/// This type can be used in structs that are being read from the target memory. -/// It holds a phantom type that can be used to describe the proper type of the pointer -/// and to read it in a more convenient way. -/// -/// This module is a direct adaption of [CasualX's great IntPtr crate](https://github.com/CasualX/intptr). -/// -/// Generally the generic Type should implement the Pod trait to be read into easily. -/// See [here](https://docs.rs/dataview/0.1.1/dataview/) for more information on the Pod trait. -/// -/// # Examples -/// -/// ``` -/// use memflow::types::Pointer64; -/// use memflow::mem::VirtualMemory; -/// use dataview::Pod; -/// -/// #[repr(C)] -/// #[derive(Clone, Debug, Pod)] -/// struct Foo { -/// pub some_value: i64, -/// } -/// -/// #[repr(C)] -/// #[derive(Clone, Debug, Pod)] -/// struct Bar { -/// pub foo_ptr: Pointer64, -/// } -/// -/// fn read_foo_bar(virt_mem: &mut T) { -/// let bar: Bar = virt_mem.virt_read(0x1234.into()).unwrap(); -/// let foo = bar.foo_ptr.deref(virt_mem).unwrap(); -/// println!("value: {}", foo.some_value); -/// } -/// -/// # use memflow::mem::dummy::DummyMemory; -/// # use memflow::types::size; -/// # read_foo_bar(&mut DummyMemory::new_virt(size::mb(4), size::mb(2), &[]).0); -/// ``` -/// -/// ``` -/// use memflow::types::Pointer64; -/// use memflow::mem::VirtualMemory; -/// use dataview::Pod; -/// -/// #[repr(C)] -/// #[derive(Clone, Debug, Pod)] -/// struct Foo { -/// pub some_value: i64, -/// } -/// -/// #[repr(C)] -/// #[derive(Clone, Debug, Pod)] -/// struct Bar { -/// pub foo_ptr: Pointer64, -/// } -/// -/// fn read_foo_bar(virt_mem: &mut T) { -/// let bar: Bar = virt_mem.virt_read(0x1234.into()).unwrap(); -/// let foo = virt_mem.virt_read_ptr64(bar.foo_ptr).unwrap(); -/// println!("value: {}", foo.some_value); -/// } -/// -/// # use memflow::mem::dummy::DummyMemory; -/// # use memflow::types::size; -/// # read_foo_bar(&mut DummyMemory::new_virt(size::mb(4), size::mb(2), &[]).0); -/// ``` -#[repr(transparent)] -#[cfg_attr(feature = "serde", derive(::serde::Serialize))] -pub struct Pointer64 { - pub address: u64, - phantom_data: PhantomData T>, -} - -impl Pointer64 { - const PHANTOM_DATA: PhantomData T> = PhantomData; - - /// A pointer with a value of zero. - pub const NULL: Pointer64 = Pointer64 { - address: 0, - phantom_data: PhantomData, - }; - - /// Returns a pointer with a value of zero. - pub fn null() -> Self { - Pointer64::NULL - } - - /// Checks wether the containing value of this pointer is zero. - pub fn is_null(self) -> bool { - self.address == 0 - } - - /// Returns the underlying raw u64 value of this pointer. - pub const fn into_raw(self) -> u64 { - self.address - } -} - -/// This function will deref the pointer directly into a Pod type. -impl Pointer64 { - pub fn deref_into(self, mem: &mut U, out: &mut T) -> PartialResult<()> { - mem.virt_read_ptr64_into(self, out) - } -} - -/// This function will return the Object this pointer is pointing towards. -impl Pointer64 { - pub fn deref(self, mem: &mut U) -> PartialResult { - mem.virt_read_ptr64(self) - } -} - -impl Pointer64<[T]> { - pub const fn decay(self) -> Pointer64 { - Pointer64 { - address: self.address, - phantom_data: Pointer64::::PHANTOM_DATA, - } - } - - pub const fn at(self, i: usize) -> Pointer64 { - let address = self.address + (i * size_of::()) as u64; - Pointer64 { - address, - phantom_data: Pointer64::::PHANTOM_DATA, - } - } -} - -impl Copy for Pointer64 {} -impl Clone for Pointer64 { - #[inline(always)] - fn clone(&self) -> Pointer64 { - *self - } -} -impl Default for Pointer64 { - #[inline(always)] - fn default() -> Pointer64 { - Pointer64::NULL - } -} -impl Eq for Pointer64 {} -impl PartialEq for Pointer64 { - #[inline(always)] - fn eq(&self, rhs: &Pointer64) -> bool { - self.address == rhs.address - } -} -impl PartialOrd for Pointer64 { - #[inline(always)] - fn partial_cmp(&self, rhs: &Pointer64) -> Option { - self.address.partial_cmp(&rhs.address) - } -} -impl Ord for Pointer64 { - #[inline(always)] - fn cmp(&self, rhs: &Pointer64) -> cmp::Ordering { - self.address.cmp(&rhs.address) - } -} -impl hash::Hash for Pointer64 { - #[inline(always)] - fn hash(&self, state: &mut H) { - self.address.hash(state) - } -} -impl AsRef for Pointer64 { - #[inline(always)] - fn as_ref(&self) -> &u64 { - &self.address - } -} -impl AsMut for Pointer64 { - #[inline(always)] - fn as_mut(&mut self) -> &mut u64 { - &mut self.address - } -} - -impl From for Pointer64 { - #[inline(always)] - fn from(address: u64) -> Pointer64 { - Pointer64 { - address, - phantom_data: PhantomData, - } - } -} -impl From> for Address { - #[inline(always)] - fn from(ptr: Pointer64) -> Address { - ptr.address.into() - } -} -impl From> for u64 { - #[inline(always)] - fn from(ptr: Pointer64) -> u64 { - ptr.address - } -} - -impl ops::Add for Pointer64 { - type Output = Pointer64; - #[inline(always)] - fn add(self, other: usize) -> Pointer64 { - let address = self.address + (other * size_of::()) as u64; - Pointer64 { - address, - phantom_data: self.phantom_data, - } - } -} -impl ops::Sub for Pointer64 { - type Output = Pointer64; - #[inline(always)] - fn sub(self, other: usize) -> Pointer64 { - let address = self.address - (other * size_of::()) as u64; - Pointer64 { - address, - phantom_data: self.phantom_data, - } - } -} - -impl fmt::Debug for Pointer64 { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:x}", self.address) - } -} -impl fmt::UpperHex for Pointer64 { - #[inline(always)] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:X}", self.address) - } -} -impl fmt::LowerHex for Pointer64 { - #[inline(always)] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:x}", self.address) - } -} -impl fmt::Display for Pointer64 { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:x}", self.address) - } -} - -unsafe impl Pod for Pointer64 {} - -impl ByteSwap for Pointer64 { - fn byte_swap(&mut self) { - self.address.byte_swap(); - } -} diff --git a/apex_dma/memflow_lib/memflow/src/types/size.rs b/apex_dma/memflow_lib/memflow/src/types/size.rs deleted file mode 100644 index f50b473..0000000 --- a/apex_dma/memflow_lib/memflow/src/types/size.rs +++ /dev/null @@ -1,49 +0,0 @@ -/*! -This module contains helper functions for creating various byte sizes. -All function are const and will be [optimized](https://rust.godbolt.org/z/T6LiwJ) by rustc. -*/ - -/// Returns a usize representing the length in bytes from the given number of kilobytes. -pub const fn kb(kb: usize) -> usize { - kb * 1024 -} - -/// Returns a usize representing the length in bytes from the given number of kilobits. -pub const fn kib(kib: usize) -> usize { - kb(kib) / 8 -} - -/// Returns a usize representing the length in bytes from the given number of megabytes. -pub const fn mb(mb: usize) -> usize { - kb(mb) * 1024 -} - -/// Returns a usize representing the length in bytes from the given number of megabits. -pub const fn mib(mib: usize) -> usize { - mb(mib) / 8 -} - -/// Returns a usize representing the length in bytes from the given number of gigabytes. -pub const fn gb(gb: usize) -> usize { - mb(gb) * 1024 -} - -/// Returns a usize representing the length in bytes from the given number of gigabits. -pub const fn gib(gib: usize) -> usize { - gb(gib) / 8 -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_from() { - assert_eq!(kb(20), 20480usize); - assert_eq!(kib(123), 15744usize); - assert_eq!(mb(20), 20_971_520_usize); - assert_eq!(mib(52), 6_815_744_usize); - assert_eq!(gb(20), 21_474_836_480_usize); - assert_eq!(gib(52), 6_979_321_856_usize); - } -} diff --git a/apex_dma/memflow_lib/nostd-test/Cargo.toml b/apex_dma/memflow_lib/nostd-test/Cargo.toml index 5e93c46..7b369db 100644 --- a/apex_dma/memflow_lib/nostd-test/Cargo.toml +++ b/apex_dma/memflow_lib/nostd-test/Cargo.toml @@ -1,11 +1,11 @@ [package] name = "nostd-test" -version = "0.1.5" -authors = ["Aurimas Blažulionis <0x60@pm.me>"] +version = "0.2.0" +authors = ["Aurimas Blažulionis <0x60@pm.me>", "ko1N "] edition = "2018" homepage = "https://memflow.github.io" repository = "https://github.com/memflow/memflow" -license-file = "../LICENSE" +license = "MIT" publish = false [profile.release] @@ -16,8 +16,7 @@ panic = "abort" [dependencies] rlibc = "1.0.0" -uefi = "0.6.0" -uefi-services = "0.3.0" -log = "0.4" -memflow = { path = "../memflow", default-features = false } -memflow-win32 = { path = "../memflow-win32/", default-features = false, features = ["embed_offsets"] } +uefi = "0.26.0" +uefi-services = "0.23.0" +log = "^0.4.14" +memflow = { version = "0.2", path = "../memflow", default-features = false } diff --git a/apex_dma/memflow_lib/nostd-test/src/main.rs b/apex_dma/memflow_lib/nostd-test/src/main.rs index cca89e0..e248821 100644 --- a/apex_dma/memflow_lib/nostd-test/src/main.rs +++ b/apex_dma/memflow_lib/nostd-test/src/main.rs @@ -4,28 +4,23 @@ use core::*; use uefi::prelude::*; +#[allow(unused)] #[macro_use] extern crate alloc; extern crate rlibc; -use crate::alloc::vec::Vec; - use log::*; -use uefi::{ - data_types::{CStr16, Char16}, - proto::Protocol, - unsafe_guid, Handle, Status, -}; +use uefi::{Handle, Status}; #[entry] -fn efi_main(handle: Handle, st: SystemTable) -> Status { - uefi_services::init(&st).expect_success("Failed to initialize utilities"); +fn efi_main(_handle: Handle, mut st: SystemTable) -> Status { + uefi_services::init(&mut st).expect_err("Failed to initialize utilities"); info!("memflow EFI test"); - let bt = st.boot_services(); + let _bt = st.boot_services(); Status::SUCCESS } diff --git a/apex_dma/memory.cpp b/apex_dma/memory.cpp index 3ab7cc7..dc509ab 100644 --- a/apex_dma/memory.cpp +++ b/apex_dma/memory.cpp @@ -1,7 +1,7 @@ #include "memory.h" -//Credits: learn_more, stevemk14ebr -size_t findPattern(const PBYTE rangeStart, size_t len, const char* pattern) +// Credits: learn_more, stevemk14ebr +size_t findPattern(const PBYTE rangeStart, size_t len, const char *pattern) { size_t l = strlen(pattern); PBYTE patt_base = static_cast(malloc(l >> 1)); @@ -64,7 +64,7 @@ void Memory::check_proc() if (status == process_status::FOUND_READY) { short c; - Read(proc.baseaddr, c); + Read(proc.baseaddr, c); if (c != 0x5A4D) { @@ -74,69 +74,85 @@ void Memory::check_proc() } } -void Memory::open_proc(const char* name) +bool kernel_init(Inventory *inv, const char *connector_name) { - if(!conn) - { - ConnectorInventory *inv = inventory_scan(); - conn = inventory_create_connector(inv, "qemu_procfs", ""); - inventory_free(inv); - } - - if (conn) - { - if(!kernel) - { - kernel = kernel_build(conn); - } - - if(kernel) - { - Kernel *tmp_ker = kernel_clone(kernel); - proc.hProcess = kernel_into_process(tmp_ker, name); - } - - if (proc.hProcess) - { - Win32ModuleInfo *module = process_module_info(proc.hProcess, name); - - if (module) - { - OsProcessModuleInfoObj *obj = module_info_trait(module); - proc.baseaddr = os_process_module_base(obj); - os_process_module_free(obj); - mem = process_virt_mem(proc.hProcess); - status = process_status::FOUND_READY; - } - else - { - status = process_status::FOUND_NO_ACCESS; - close_proc(); - } - } - else - { - status = process_status::NOT_FOUND; - } - } - else - { - printf("Can't create connector\n"); - exit(0); - } + if (inventory_create_connector(inv, connector_name, "", conn.get())) + { + printf("Can't create %s connector\n", connector_name); + return false; + } + else + { + printf("%s connector created\n", connector_name); + } + + kernel = std::make_unique>(); + if (inventory_create_os(inv, "win32", "", conn.get(), kernel.get())) + { + printf("Unable to initialize kernel using %s connector\n", connector_name); + connector_drop(conn.get()); + return false; + } + + return true; } -void Memory::close_proc() +void Memory::open_proc(const char *name) { - if (proc.hProcess) + if (!conn) + { + conn = std::make_unique>(); + Inventory *inv = inventory_scan(); + + printf("Init with qemu connector...\n"); + if (!kernel_init(inv, "qemu")) + { + printf("Init with kvm connector...\n"); + if (!kernel_init(inv, "kvm")) + { + printf("Quitting\n"); + inventory_free(inv); + exit(1); + } + } + + printf("Kernel initialized: %p\n", kernel.get()->container.instance.instance); + } + + if (kernel.get()->process_by_name(name, &proc.hProcess)) + { + status = process_status::NOT_FOUND; + return; + } + + ModuleInfo module_info; + for (size_t dtb = 0; dtb <= SIZE_MAX; dtb += 0x1000) { - process_free(proc.hProcess); - virt_free(mem); + if (!proc.hProcess.module_by_name(name, &module_info)) + break; + + if (dtb == SIZE_MAX) + { + printf("Access error for process %s\n", name); + status = process_status::FOUND_NO_ACCESS; + return; + } + else + { + if (dtb == 0) + printf("Can't find base module info for process %s. Trying with a new dtb...\n", name); + + proc.hProcess.set_dtb(dtb, Address_INVALID); + } } - proc.hProcess = 0; + proc.baseaddr = module_info.base; + status = process_status::FOUND_READY; +} + +void Memory::close_proc() +{ proc.baseaddr = 0; - mem = 0; } uint64_t Memory::ScanPointer(uint64_t ptr_address, const uint32_t offsets[], int level) diff --git a/apex_dma/memory.h b/apex_dma/memory.h index 4096bef..df99b90 100644 --- a/apex_dma/memory.h +++ b/apex_dma/memory.h @@ -1,20 +1,21 @@ -#include "memflow_win32.h" +#include "memflow.hpp" #include #include #include +#include -#define INRANGE(x,a,b) (x >= a && x <= b) -#define getBits( x ) (INRANGE(x,'0','9') ? (x - '0') : ((x&(~0x20)) - 'A' + 0xa)) -#define getByte( x ) (getBits(x[0]) << 4 | getBits(x[1])) +#define INRANGE(x, a, b) (x >= a && x <= b) +#define getBits(x) (INRANGE(x, '0', '9') ? (x - '0') : ((x & (~0x20)) - 'A' + 0xa)) +#define getByte(x) (getBits(x[0]) << 4 | getBits(x[1])) -typedef uint8_t* PBYTE; +typedef uint8_t *PBYTE; typedef uint8_t BYTE; typedef unsigned long DWORD; typedef unsigned short WORD; typedef WORD *PWORD; -static CloneablePhysicalMemoryObj *conn = 0; -static Kernel *kernel = 0; +static std::unique_ptr> conn = nullptr; +static std::unique_ptr> kernel = nullptr; inline bool isMatch(const PBYTE addr, const PBYTE pat, const PBYTE msk) { @@ -29,13 +30,13 @@ inline bool isMatch(const PBYTE addr, const PBYTE pat, const PBYTE msk) return false; } -size_t findPattern(const PBYTE rangeStart, size_t len, const char* pattern); +size_t findPattern(const PBYTE rangeStart, size_t len, const char *pattern); typedef struct Process { - Win32Process* hProcess = 0; + ProcessInstance<> hProcess; uint64_t baseaddr = 0; -}Process; +} Process; enum class process_status : BYTE { @@ -48,11 +49,15 @@ class Memory { private: Process proc; - VirtualMemoryObj* mem; process_status status = process_status::NOT_FOUND; std::mutex m; + public: - ~Memory() { if (mem) virt_free(mem); if (proc.hProcess) process_free(proc.hProcess); } + ~Memory() + { + if (kernel) + os_drop(kernel.get()); + } uint64_t get_proc_baseaddr(); @@ -60,49 +65,49 @@ public: void check_proc(); - void open_proc(const char* name); + void open_proc(const char *name); void close_proc(); - template - bool Read(uint64_t address, T& out); + template + bool Read(uint64_t address, T &out); - template + template bool ReadArray(uint64_t address, T out[], size_t len); - template - bool Write(uint64_t address, const T& value); + template + bool Write(uint64_t address, const T &value); - template + template bool WriteArray(uint64_t address, const T value[], size_t len); uint64_t ScanPointer(uint64_t ptr_address, const uint32_t offsets[], int level); }; -template -inline bool Memory::Read(uint64_t address, T& out) +template +inline bool Memory::Read(uint64_t address, T &out) { std::lock_guard l(m); - return mem && virt_read_raw_into(mem, address, (uint8_t*)&out, sizeof(T)) == 0; + return proc.baseaddr && proc.hProcess.read_raw_into(address, CSliceMut((char *)&out, sizeof(T))) == 0; } -template +template inline bool Memory::ReadArray(uint64_t address, T out[], size_t len) { std::lock_guard l(m); - return mem && virt_read_raw_into(mem, address, (uint8_t*)out, sizeof(T) * len) == 0; + return proc.baseaddr && proc.hProcess.read_raw_into(address, CSliceMut((char *)out, sizeof(T) * len)) == 0; } -template -inline bool Memory::Write(uint64_t address, const T& value) +template +inline bool Memory::Write(uint64_t address, const T &value) { std::lock_guard l(m); - return mem && virt_write_raw(mem, address, (uint8_t*)&value, sizeof(T)) == 0; + return proc.baseaddr && proc.hProcess.write_raw(address, CSliceRef((char *)&value, sizeof(T))) == 0; } -template +template inline bool Memory::WriteArray(uint64_t address, const T value[], size_t len) { std::lock_guard l(m); - return mem && virt_write_raw(mem, address, (uint8_t*)value, sizeof(T) * len) == 0; + return proc.baseaddr && proc.hProcess.write_raw(address, CSliceRef((char *)value, sizeof(T) * len)) == 0; } \ No newline at end of file