diff --git a/aya/src/sys/bpf.rs b/aya/src/sys/bpf.rs
index 7cb0b545..a4512bd2 100644
--- a/aya/src/sys/bpf.rs
+++ b/aya/src/sys/bpf.rs
@@ -681,7 +681,10 @@ pub(crate) fn bpf_load_btf(
 }
 
 // SAFETY: only use for bpf_cmd that return a new file descriptor on success.
-unsafe fn fd_sys_bpf(cmd: bpf_cmd, attr: &mut bpf_attr) -> io::Result<crate::MockableFd> {
+pub(super) unsafe fn fd_sys_bpf(
+    cmd: bpf_cmd,
+    attr: &mut bpf_attr,
+) -> io::Result<crate::MockableFd> {
     let fd = sys_bpf(cmd, attr)?;
     let fd = fd.try_into().map_err(|std::num::TryFromIntError { .. }| {
         io::Error::new(
diff --git a/aya/src/sys/feature_probe.rs b/aya/src/sys/feature_probe.rs
index 9b7ab2fb..cf2041c6 100644
--- a/aya/src/sys/feature_probe.rs
+++ b/aya/src/sys/feature_probe.rs
@@ -1,17 +1,21 @@
 //! Probes and identifies available eBPF features supported by the host kernel.
 
-use std::io::ErrorKind;
+use std::{io::ErrorKind, mem, os::fd::AsRawFd as _};
 
 use aya_obj::{
     btf::{Btf, BtfError, BtfKind},
-    generated::{BPF_F_SLEEPABLE, bpf_attach_type},
+    generated::{
+        BPF_F_MMAPABLE, BPF_F_NO_PREALLOC, BPF_F_SLEEPABLE, bpf_attach_type, bpf_attr, bpf_cmd,
+        bpf_map_type,
+    },
 };
-use libc::{E2BIG, EINVAL};
+use libc::{E2BIG, EBADF, EINVAL};
 
-use super::{SyscallError, bpf_prog_load, with_trivial_prog};
+use super::{SyscallError, bpf_prog_load, fd_sys_bpf, with_trivial_prog};
 use crate::{
+    maps::MapType,
     programs::{ProgramError, ProgramType},
-    util::KernelVersion,
+    util::{KernelVersion, page_size},
 };
 
 /// Whether the host kernel supports the [`ProgramType`].
@@ -78,6 +82,150 @@ pub fn is_program_supported(program_type: ProgramType) -> Result<bool, ProgramEr
     }
 }
 
+/// Whether the host kernel supports the [`MapType`].
+///
+/// # Examples
+///
+/// ```no_run
+/// # use aya::{
+/// #     maps::MapType,
+/// #     sys::feature_probe::is_map_supported,
+/// # };
+/// #
+/// match is_map_supported(MapType::HashOfMaps) {
+///     Ok(true) => println!("hash_of_maps supported :)"),
+///     Ok(false) => println!("hash_of_maps not supported :("),
+///     Err(err) => println!("Uh oh! Unexpected error: {:?}", err),
+/// }
+/// ```
+///
+/// # Errors
+///
+/// Returns [`SyscallError`] if kernel probing fails with an unexpected error.
+///
+/// Note that certain errors are expected and handled internally; only
+/// unanticipated failures during probing will result in this error.
+pub fn is_map_supported(map_type: MapType) -> Result<bool, SyscallError> {
+    if map_type == MapType::Unspecified {
+        return Ok(false);
+    }
+
+    // SAFETY: all-zero byte-pattern valid for `bpf_attr`
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+    // SAFETY: union access
+    let u = unsafe { &mut attr.__bindgen_anon_1 };
+
+    // To pass `map_alloc_check`/`map_alloc`
+    let key_size = match map_type {
+        MapType::LpmTrie | MapType::CgroupStorage | MapType::PerCpuCgroupStorage => 16,
+        MapType::Queue
+        | MapType::Stack
+        | MapType::RingBuf
+        | MapType::BloomFilter
+        | MapType::UserRingBuf
+        | MapType::Arena => 0,
+        _ => 4,
+    };
+    let value_size = match map_type {
+        MapType::StackTrace | MapType::LpmTrie => 8,
+        MapType::InodeStorage => {
+            // Intentionally trigger `E2BIG` from
+            // `bpf_local_storage_map_alloc_check()`.
+            u32::MAX
+        }
+        MapType::RingBuf | MapType::UserRingBuf | MapType::Arena => 0,
+        _ => 4,
+    };
+    let max_entries = match map_type {
+        MapType::CgroupStorage
+        | MapType::PerCpuCgroupStorage
+        | MapType::SkStorage
+        | MapType::InodeStorage
+        | MapType::TaskStorage
+        | MapType::CgrpStorage => 0,
+        MapType::RingBuf | MapType::UserRingBuf => page_size() as u32,
+        _ => 1,
+    };
+
+    // Ensure that fd doesn't get dropped due to scoping.
+    let inner_map_fd;
+    match map_type {
+        MapType::LpmTrie => u.map_flags = BPF_F_NO_PREALLOC,
+        MapType::SkStorage
+        | MapType::InodeStorage
+        | MapType::TaskStorage
+        | MapType::CgrpStorage => {
+            u.map_flags = BPF_F_NO_PREALLOC;
+            // Intentionally trigger `EBADF` from `btf_get_by_fd()`.
+            u.btf_fd = u32::MAX;
+            u.btf_key_type_id = 1;
+            u.btf_value_type_id = 1;
+        }
+        MapType::ArrayOfMaps | MapType::HashOfMaps => {
+            // SAFETY: all-zero byte-pattern valid for `bpf_attr`
+            let mut attr_map = unsafe { mem::zeroed::<bpf_attr>() };
+            // SAFETY: union access
+            let u_map = unsafe { &mut attr_map.__bindgen_anon_1 };
+            u_map.map_type = bpf_map_type::BPF_MAP_TYPE_HASH as u32;
+            u_map.key_size = 1;
+            u_map.value_size = 1;
+            u_map.max_entries = 1;
+            // SAFETY: BPF_MAP_CREATE returns a new file descriptor.
+            inner_map_fd = unsafe { fd_sys_bpf(bpf_cmd::BPF_MAP_CREATE, &mut attr_map) }.map_err(
+                |io_error| SyscallError {
+                    call: "bpf_map_create",
+                    io_error,
+                },
+            )?;
+
+            u.inner_map_fd = inner_map_fd.as_raw_fd() as u32;
+        }
+        MapType::StructOps => u.btf_vmlinux_value_type_id = 1,
+        MapType::Arena => u.map_flags = BPF_F_MMAPABLE,
+        _ => {}
+    }
+
+    u.map_type = map_type as u32;
+    u.key_size = key_size;
+    u.value_size = value_size;
+    u.max_entries = max_entries;
+
+    // SAFETY: BPF_MAP_CREATE returns a new file descriptor.
+    let io_error = match unsafe { fd_sys_bpf(bpf_cmd::BPF_MAP_CREATE, &mut attr) } {
+        Ok(_) => return Ok(true),
+        Err(io_error) => io_error,
+    };
+    match io_error.raw_os_error() {
+        Some(EINVAL) => Ok(false),
+        Some(E2BIG) if map_type == MapType::InodeStorage => Ok(true),
+        Some(E2BIG)
+            if matches!(
+                map_type,
+                MapType::SkStorage
+                    | MapType::StructOps
+                    | MapType::TaskStorage
+                    | MapType::CgrpStorage
+            ) =>
+        {
+            Ok(false)
+        }
+        Some(EBADF)
+            if matches!(
+                map_type,
+                MapType::SkStorage | MapType::TaskStorage | MapType::CgrpStorage
+            ) =>
+        {
+            Ok(true)
+        }
+        // `ENOTSUPP` from `bpf_struct_ops_map_alloc()` for struct_ops.
+        Some(524) if map_type == MapType::StructOps => Ok(true),
+        _ => Err(SyscallError {
+            call: "bpf_map_create",
+            io_error,
+        }),
+    }
+}
+
 /// Create a minimal program with the specified type.
 /// Types not created for `Extension` and `StructOps`.
 fn create_minimal_program(
diff --git a/test/integration-test/src/tests/feature_probe.rs b/test/integration-test/src/tests/feature_probe.rs
index 47728c2f..7c7603e0 100644
--- a/test/integration-test/src/tests/feature_probe.rs
+++ b/test/integration-test/src/tests/feature_probe.rs
@@ -3,7 +3,7 @@
 use std::path::Path;
 
 use assert_matches::assert_matches;
-use aya::{programs::ProgramType, sys::feature_probe::*, util::KernelVersion};
+use aya::{maps::MapType, programs::ProgramType, sys::feature_probe::*, util::KernelVersion};
 
 // TODO: Enable certain CONFIG_* options when compiling the image for VM tests.
 #[test]
@@ -193,3 +193,200 @@ fn probe_supported_programs() {
         assert_matches!(netfilter, Ok(false));
     }
 }
+
+#[test]
+fn probe_supported_maps() {
+    let current = KernelVersion::current().unwrap();
+
+    let hash = is_map_supported(MapType::Hash);
+    let array = is_map_supported(MapType::Array);
+    if current >= KernelVersion::new(3, 19, 0) {
+        assert_matches!(hash, Ok(true));
+        assert_matches!(array, Ok(true));
+    } else {
+        assert_matches!(hash, Ok(false));
+        assert_matches!(array, Ok(false));
+    }
+
+    let prog_array = is_map_supported(MapType::ProgramArray);
+    if current >= KernelVersion::new(4, 2, 0) {
+        assert_matches!(prog_array, Ok(true));
+    } else {
+        assert_matches!(prog_array, Ok(false));
+    }
+
+    let perf_event_array = is_map_supported(MapType::PerfEventArray);
+    if current >= KernelVersion::new(4, 3, 0) {
+        assert_matches!(perf_event_array, Ok(true));
+    } else {
+        assert_matches!(perf_event_array, Ok(false));
+    }
+
+    let per_cpu_hash = is_map_supported(MapType::PerCpuHash);
+    let per_cpu_array = is_map_supported(MapType::PerCpuArray);
+    let stack_trace = is_map_supported(MapType::StackTrace);
+    if current >= KernelVersion::new(4, 6, 0) {
+        assert_matches!(per_cpu_hash, Ok(true));
+        assert_matches!(per_cpu_array, Ok(true));
+        assert_matches!(stack_trace, Ok(true));
+    } else {
+        assert_matches!(per_cpu_hash, Ok(false));
+        assert_matches!(per_cpu_array, Ok(false));
+        assert_matches!(stack_trace, Ok(false));
+    }
+
+    let cgroup_array = is_map_supported(MapType::CgroupArray);
+    if current >= KernelVersion::new(4, 8, 0) {
+        assert_matches!(cgroup_array, Ok(true));
+    } else {
+        assert_matches!(cgroup_array, Ok(false));
+    }
+
+    let lru_hash = is_map_supported(MapType::LruHash);
+    let lru_per_cpu_hash = is_map_supported(MapType::LruPerCpuHash);
+    if current >= KernelVersion::new(4, 10, 0) {
+        assert_matches!(lru_hash, Ok(true));
+        assert_matches!(lru_per_cpu_hash, Ok(true));
+    } else {
+        assert_matches!(lru_hash, Ok(false));
+        assert_matches!(lru_per_cpu_hash, Ok(false));
+    }
+
+    let lpm_trie = is_map_supported(MapType::LpmTrie);
+    if current >= KernelVersion::new(4, 11, 0) {
+        assert_matches!(lpm_trie, Ok(true));
+    } else {
+        assert_matches!(lpm_trie, Ok(false));
+    }
+
+    let array_of_maps = is_map_supported(MapType::ArrayOfMaps);
+    let hash_of_maps = is_map_supported(MapType::HashOfMaps);
+    if current >= KernelVersion::new(4, 12, 0) {
+        assert_matches!(array_of_maps, Ok(true));
+        assert_matches!(hash_of_maps, Ok(true));
+    } else {
+        assert_matches!(array_of_maps, Ok(false));
+        assert_matches!(hash_of_maps, Ok(false));
+    }
+
+    let dev_map = is_map_supported(MapType::DevMap);
+    let sock_map = is_map_supported(MapType::SockMap);
+    if current >= KernelVersion::new(4, 14, 0) {
+        assert_matches!(dev_map, Ok(true));
+        assert_matches!(sock_map, Ok(true));
+    } else {
+        assert_matches!(dev_map, Ok(false));
+        assert_matches!(sock_map, Ok(false));
+    }
+
+    let cpu_map = is_map_supported(MapType::CpuMap);
+    if current >= KernelVersion::new(4, 15, 0) {
+        assert_matches!(cpu_map, Ok(true));
+    } else {
+        assert_matches!(cpu_map, Ok(false));
+    }
+
+    let xsk_map = is_map_supported(MapType::XskMap);
+    let sock_hash = is_map_supported(MapType::SockHash);
+    if current >= KernelVersion::new(4, 18, 0) {
+        assert_matches!(xsk_map, Ok(true));
+        assert_matches!(sock_hash, Ok(true));
+    } else {
+        assert_matches!(xsk_map, Ok(false));
+        assert_matches!(sock_hash, Ok(false));
+    }
+
+    let cgroup_storage = is_map_supported(MapType::CgroupStorage);
+    let reuseport_sock_array = is_map_supported(MapType::ReuseportSockArray);
+    if current >= KernelVersion::new(4, 19, 0) {
+        assert_matches!(cgroup_storage, Ok(true));
+        assert_matches!(reuseport_sock_array, Ok(true));
+    } else {
+        assert_matches!(cgroup_storage, Ok(false));
+        assert_matches!(reuseport_sock_array, Ok(false));
+    }
+
+    let per_cpu_cgroup_storage = is_map_supported(MapType::PerCpuCgroupStorage);
+    let queue = is_map_supported(MapType::Queue);
+    let stack = is_map_supported(MapType::Stack);
+    if current >= KernelVersion::new(4, 20, 0) {
+        assert_matches!(per_cpu_cgroup_storage, Ok(true));
+        assert_matches!(queue, Ok(true));
+        assert_matches!(stack, Ok(true));
+    } else {
+        assert_matches!(per_cpu_cgroup_storage, Ok(false));
+        assert_matches!(queue, Ok(false));
+        assert_matches!(stack, Ok(false));
+    }
+
+    let sk_storage = is_map_supported(MapType::SkStorage);
+    if current >= KernelVersion::new(5, 2, 0) {
+        assert_matches!(sk_storage, Ok(true));
+    } else {
+        assert_matches!(sk_storage, Ok(false));
+    }
+
+    let devmap_hash = is_map_supported(MapType::DevMapHash);
+    if current >= KernelVersion::new(5, 4, 0) {
+        assert_matches!(devmap_hash, Ok(true));
+    } else {
+        assert_matches!(devmap_hash, Ok(false));
+    }
+
+    let struct_ops = is_map_supported(MapType::StructOps);
+    if current >= KernelVersion::new(5, 6, 0) {
+        assert_matches!(struct_ops, Ok(true));
+    } else {
+        assert_matches!(struct_ops, Ok(false));
+    }
+
+    let ring_buf = is_map_supported(MapType::RingBuf);
+    if current >= KernelVersion::new(5, 8, 0) {
+        assert_matches!(ring_buf, Ok(true));
+    } else {
+        assert_matches!(ring_buf, Ok(false));
+    }
+
+    // Requires `CONFIG_BPF_LSM=y`
+    // let inode_storage = is_map_supported(MapType::InodeStorage));
+    // if current >= KernelVersion::new(5, 10, 0) {
+    //     assert_matches!(inode_storage, Ok(true));
+    // } else {
+    //     assert_matches!(inode_storage, Ok(false));
+    // }
+
+    let task_storage = is_map_supported(MapType::TaskStorage);
+    if current >= KernelVersion::new(5, 11, 0) {
+        assert_matches!(task_storage, Ok(true));
+    } else {
+        assert_matches!(task_storage, Ok(false));
+    }
+
+    let bloom_filter = is_map_supported(MapType::BloomFilter);
+    if current >= KernelVersion::new(5, 16, 0) {
+        assert_matches!(bloom_filter, Ok(true));
+    } else {
+        assert_matches!(bloom_filter, Ok(false));
+    }
+
+    let user_ring_buf = is_map_supported(MapType::UserRingBuf);
+    if current >= KernelVersion::new(6, 1, 0) {
+        assert_matches!(user_ring_buf, Ok(true));
+    } else {
+        assert_matches!(user_ring_buf, Ok(false));
+    }
+
+    let cgrp_storage = is_map_supported(MapType::CgrpStorage);
+    if current >= KernelVersion::new(6, 2, 0) {
+        assert_matches!(cgrp_storage, Ok(true));
+    } else {
+        assert_matches!(cgrp_storage, Ok(false));
+    }
+
+    let arena = is_map_supported(MapType::Arena);
+    if current >= KernelVersion::new(6, 9, 0) {
+        assert_matches!(arena, Ok(true));
+    } else {
+        assert_matches!(arena, Ok(false));
+    }
+}
diff --git a/xtask/public-api/aya.txt b/xtask/public-api/aya.txt
index 92d8ff60..8e2c2238 100644
--- a/xtask/public-api/aya.txt
+++ b/xtask/public-api/aya.txt
@@ -9998,6 +9998,7 @@ pub fn aya::programs::tc::SchedClassifier::fd(&self) -> core::result::Result<std
 pub fn aya::programs::loaded_programs() -> impl core::iter::traits::iterator::Iterator<Item = core::result::Result<aya::programs::ProgramInfo, aya::programs::ProgramError>>
 pub mod aya::sys
 pub mod aya::sys::feature_probe
+pub fn aya::sys::feature_probe::is_map_supported(map_type: aya::maps::MapType) -> core::result::Result<bool, aya::sys::SyscallError>
 pub fn aya::sys::feature_probe::is_program_supported(program_type: aya::programs::ProgramType) -> core::result::Result<bool, aya::programs::ProgramError>
 #[non_exhaustive] pub enum aya::sys::Stats
 pub aya::sys::Stats::RunTime