Merge "Update intake and climber constants"
diff --git a/aos/events/event_loop_runtime.cc b/aos/events/event_loop_runtime.cc
index e3d73c1..790fe9e 100644
--- a/aos/events/event_loop_runtime.cc
+++ b/aos/events/event_loop_runtime.cc
@@ -2,13 +2,15 @@
namespace aos {
-OnRunForRust::OnRunForRust(EventLoopRuntime *runtime) : runtime_(runtime) {
+OnRunForRust::OnRunForRust(const EventLoopRuntime *runtime)
+ : runtime_(runtime) {
++runtime->child_count_;
}
OnRunForRust::~OnRunForRust() { --runtime_->child_count_; }
bool OnRunForRust::is_running() const { return runtime_->is_running(); }
-std::unique_ptr<TimerForRust> TimerForRust::Make(EventLoopRuntime *runtime) {
+std::unique_ptr<TimerForRust> TimerForRust::Make(
+ const EventLoopRuntime *runtime) {
auto handler = std::unique_ptr<TimerForRust>(new TimerForRust());
TimerForRust *inner = handler.get();
handler->timer_ = runtime->event_loop()->AddTimer([inner, runtime] {
diff --git a/aos/events/event_loop_runtime.h b/aos/events/event_loop_runtime.h
index 7cc551f..7c23456 100644
--- a/aos/events/event_loop_runtime.h
+++ b/aos/events/event_loop_runtime.h
@@ -131,18 +131,18 @@
class OnRunForRust {
public:
- OnRunForRust(EventLoopRuntime *runtime);
+ OnRunForRust(const EventLoopRuntime *runtime);
~OnRunForRust();
bool is_running() const;
private:
- EventLoopRuntime *const runtime_;
+ const EventLoopRuntime *const runtime_;
};
class TimerForRust {
public:
- static std::unique_ptr<TimerForRust> Make(EventLoopRuntime *runtime);
+ static std::unique_ptr<TimerForRust> Make(const EventLoopRuntime *runtime);
TimerForRust(const TimerForRust &) = delete;
TimerForRust(TimerForRust &&) = delete;
@@ -185,9 +185,9 @@
<< ": Some child objects were not destroyed first";
}
- EventLoop *event_loop() { return event_loop_; }
+ EventLoop *event_loop() const { return event_loop_; }
- void Spawn(std::unique_ptr<ApplicationFuture> task) {
+ void Spawn(std::unique_ptr<ApplicationFuture> task) const {
CHECK(!task_) << ": May only call Spawn once";
task_ = std::move(task);
DoPoll();
@@ -219,30 +219,40 @@
rust::Str name() const { return StringViewToRustStr(event_loop_->name()); }
- WatcherForRust MakeWatcher(const Channel *channel) {
+ WatcherForRust MakeWatcher(const Channel *channel) const {
event_loop_->MakeRawNoArgWatcher(channel,
[this](const Context &) { DoPoll(); });
return WatcherForRust(event_loop_->MakeRawFetcher(channel));
}
- SenderForRust MakeSender(const Channel *channel) {
+ SenderForRust MakeSender(const Channel *channel) const {
return SenderForRust(event_loop_->MakeRawSender(channel));
}
- FetcherForRust MakeFetcher(const Channel *channel) {
+ FetcherForRust MakeFetcher(const Channel *channel) const {
return FetcherForRust(event_loop_->MakeRawFetcher(channel));
}
- OnRunForRust MakeOnRun() { return OnRunForRust(this); }
+ OnRunForRust MakeOnRun() const { return OnRunForRust(this); }
- std::unique_ptr<TimerForRust> AddTimer() { return TimerForRust::Make(this); }
+ std::unique_ptr<TimerForRust> AddTimer() const {
+ return TimerForRust::Make(this);
+ }
+
+ void SetRuntimeRealtimePriority(int priority) const {
+ event_loop_->SetRuntimeRealtimePriority(priority);
+ }
+
+ void SetRuntimeAffinity(const cpu_set_t &cpuset) const {
+ event_loop_->SetRuntimeAffinity(cpuset);
+ }
private:
friend class OnRunForRust;
friend class TimerForRust;
// Polls the top-level future once. This is what all the callbacks should do.
- void DoPoll() {
+ void DoPoll() const {
if (task_) {
CHECK(task_->Poll()) << ": Rust panic, aborting";
}
@@ -250,9 +260,20 @@
EventLoop *const event_loop_;
- std::unique_ptr<ApplicationFuture> task_;
+ // For Rust's EventLoopRuntime to be semantically equivelant to C++'s event
+ // loop, we need the ability to have shared references (&EventLoopRuntime) on
+ // the Rust side. Without that, the API would be overly restrictive to be
+ // usable. In order for the generated code to use &self references on methods,
+ // they need to be marked `const` on the C++ side. We use the `mutable`
+ // keyword to allow mutation through `const` methods.
+ //
+ // SAFETY:
+ // * The event loop runtime must be `!Sync` in the Rust side (default).
+ // * We can't expose exclusive references (&mut) to either of the mutable
+ // fields on the Rust side from a shared reference (&).
+ mutable std::unique_ptr<ApplicationFuture> task_;
- int child_count_ = 0;
+ mutable int child_count_ = 0;
};
} // namespace aos
diff --git a/aos/events/event_loop_runtime.rs b/aos/events/event_loop_runtime.rs
index 35a4225..1f70bf6 100644
--- a/aos/events/event_loop_runtime.rs
+++ b/aos/events/event_loop_runtime.rs
@@ -259,7 +259,7 @@
impl<T: EventLoopHolder> Drop for EventLoopRuntimeHolder<T> {
fn drop(&mut self) {
- let event_loop = self.0.as_mut().event_loop();
+ let event_loop = self.0.event_loop();
// SAFETY: We're not going to touch this field again. The underlying EventLoop will not be
// run again because we're going to drop it next.
unsafe { ManuallyDrop::drop(&mut self.0) };
@@ -399,8 +399,8 @@
///
/// The returned value should only be used for destroying it (_after_ `self` is dropped) or
/// calling other C++ APIs.
- pub fn raw_event_loop(&mut self) -> *mut ffi::aos::EventLoop {
- self.0.as_mut().event_loop()
+ pub fn raw_event_loop(&self) -> *mut ffi::aos::EventLoop {
+ self.0.event_loop()
}
/// Returns a reference to the name of this EventLoop.
@@ -563,8 +563,8 @@
/// }});
/// # }
/// ```
- pub fn spawn(&mut self, task: impl Future<Output = Never> + 'event_loop) {
- self.0.as_mut().Spawn(RustApplicationFuture::new(task));
+ pub fn spawn(&self, task: impl Future<Output = Never> + 'event_loop) {
+ self.0.Spawn(RustApplicationFuture::new(task));
}
pub fn configuration(&self) -> &'event_loop Configuration {
@@ -590,10 +590,10 @@
/// # Panics
///
/// Dropping `self` before the returned object is dropped will panic.
- pub fn make_raw_watcher(&mut self, channel: &'event_loop Channel) -> RawWatcher {
+ pub fn make_raw_watcher(&self, channel: &'event_loop Channel) -> RawWatcher {
// SAFETY: `channel` is valid for the necessary lifetime, all other requirements fall under
// the usual autocxx heuristics.
- RawWatcher(unsafe { self.0.as_mut().MakeWatcher(channel) }.within_box())
+ RawWatcher(unsafe { self.0.MakeWatcher(channel) }.within_box())
}
/// Provides type-safe async blocking access to messages on a channel. `T` should be a
@@ -603,7 +603,7 @@
/// # Panics
///
/// Dropping `self` before the returned object is dropped will panic.
- pub fn make_watcher<T>(&mut self, channel_name: &str) -> Result<Watcher<T>, ChannelLookupError>
+ pub fn make_watcher<T>(&self, channel_name: &str) -> Result<Watcher<T>, ChannelLookupError>
where
for<'a> T: FollowWith<'a>,
for<'a> <T as FollowWith<'a>>::Inner: Follow<'a>,
@@ -619,10 +619,10 @@
/// # Panics
///
/// Dropping `self` before the returned object is dropped will panic.
- pub fn make_raw_sender(&mut self, channel: &'event_loop Channel) -> RawSender {
+ pub fn make_raw_sender(&self, channel: &'event_loop Channel) -> RawSender {
// SAFETY: `channel` is valid for the necessary lifetime, all other requirements fall under
// the usual autocxx heuristics.
- RawSender(unsafe { self.0.as_mut().MakeSender(channel) }.within_box())
+ RawSender(unsafe { self.0.MakeSender(channel) }.within_box())
}
/// Allows sending messages on a channel with a type-safe API.
@@ -630,7 +630,7 @@
/// # Panics
///
/// Dropping `self` before the returned object is dropped will panic.
- pub fn make_sender<T>(&mut self, channel_name: &str) -> Result<Sender<T>, ChannelLookupError>
+ pub fn make_sender<T>(&self, channel_name: &str) -> Result<Sender<T>, ChannelLookupError>
where
for<'a> T: FollowWith<'a>,
for<'a> <T as FollowWith<'a>>::Inner: Follow<'a>,
@@ -646,10 +646,10 @@
/// # Panics
///
/// Dropping `self` before the returned object is dropped will panic.
- pub fn make_raw_fetcher(&mut self, channel: &'event_loop Channel) -> RawFetcher {
+ pub fn make_raw_fetcher(&self, channel: &'event_loop Channel) -> RawFetcher {
// SAFETY: `channel` is valid for the necessary lifetime, all other requirements fall under
// the usual autocxx heuristics.
- RawFetcher(unsafe { self.0.as_mut().MakeFetcher(channel) }.within_box())
+ RawFetcher(unsafe { self.0.MakeFetcher(channel) }.within_box())
}
/// Provides type-safe access to messages on a channel, without the ability to wait for a new
@@ -659,7 +659,7 @@
/// # Panics
///
/// Dropping `self` before the returned object is dropped will panic.
- pub fn make_fetcher<T>(&mut self, channel_name: &str) -> Result<Fetcher<T>, ChannelLookupError>
+ pub fn make_fetcher<T>(&self, channel_name: &str) -> Result<Fetcher<T>, ChannelLookupError>
where
for<'a> T: FollowWith<'a>,
for<'a> <T as FollowWith<'a>>::Inner: Follow<'a>,
@@ -676,8 +676,8 @@
/// subsequent code will have any realtime scheduling applied. This means it can rely on
/// consistent timing, but it can no longer create any EventLoop child objects or do anything
/// else non-realtime.
- pub fn on_run(&mut self) -> OnRun {
- OnRun(self.0.as_mut().MakeOnRun().within_box())
+ pub fn on_run(&self) -> OnRun {
+ OnRun(self.0.MakeOnRun().within_box())
}
pub fn is_running(&self) -> bool {
@@ -685,16 +685,21 @@
}
/// Returns an unarmed timer.
- pub fn add_timer(&mut self) -> Timer {
- Timer(self.0.as_mut().AddTimer())
+ pub fn add_timer(&self) -> Timer {
+ Timer(self.0.AddTimer())
}
/// Returns a timer that goes off every `duration`-long ticks.
- pub fn add_interval(&mut self, duration: Duration) -> Timer {
+ pub fn add_interval(&self, duration: Duration) -> Timer {
let mut timer = self.add_timer();
timer.setup(self.monotonic_now(), Some(duration));
timer
}
+
+ /// Sets the scheduler priority to run the event loop at.
+ pub fn set_realtime_priority(&self, priority: i32) {
+ self.0.SetRuntimeRealtimePriority(priority.into());
+ }
}
/// An event loop primitive that allows sleeping asynchronously.
@@ -1461,6 +1466,12 @@
}
}
+impl From<MonotonicInstant> for i64 {
+ fn from(value: MonotonicInstant) -> Self {
+ value.0
+ }
+}
+
impl fmt::Debug for MonotonicInstant {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.duration_since_epoch().fmt(f)
@@ -1489,6 +1500,12 @@
}
}
+impl From<RealtimeInstant> for i64 {
+ fn from(value: RealtimeInstant) -> Self {
+ value.0
+ }
+}
+
impl fmt::Debug for RealtimeInstant {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.duration_since_epoch().fmt(f)
diff --git a/aos/events/ping.rs b/aos/events/ping.rs
index b9725b8..3226461 100644
--- a/aos/events/ping.rs
+++ b/aos/events/ping.rs
@@ -2,7 +2,6 @@
use aos_events_event_loop_runtime::{EventLoopRuntime, Sender, Watcher};
use aos_events_shm_event_loop::ShmEventLoop;
use core::cell::Cell;
-use core::future::Future;
use core::time::Duration;
use futures::never::Never;
use std::path::Path;
@@ -15,8 +14,7 @@
let config = config::read_config_from(Path::new("pingpong_config.json")).unwrap();
let ping = PingTask::new();
ShmEventLoop::new(&config).run_with(|runtime| {
- let task = ping.tasks(runtime);
- runtime.spawn(task);
+ runtime.spawn(ping.tasks(runtime));
});
}
@@ -33,55 +31,42 @@
}
/// Returns a future with all the tasks for the ping process
- pub fn tasks(&self, event_loop: &mut EventLoopRuntime) -> impl Future<Output = Never> + '_ {
- let ping = self.ping(event_loop);
- let handle_pong = self.handle_pong(event_loop);
-
- async move {
- futures::join!(ping, handle_pong);
- unreachable!("Let's hope `never_type` gets stabilized soon :)");
- }
+ pub async fn tasks(&self, event_loop: &EventLoopRuntime<'_>) -> Never {
+ futures::join!(self.ping(event_loop), self.handle_pong(event_loop));
+ unreachable!("Let's hope `never_type` gets stabilized soon :)");
}
- fn ping(&self, event_loop: &mut EventLoopRuntime) -> impl Future<Output = Never> + '_ {
+ async fn ping(&self, event_loop: &EventLoopRuntime<'_>) -> Never {
// The sender is used to send messages back to the pong channel.
let mut ping_sender: Sender<ping::Ping> = event_loop.make_sender("/test").unwrap();
- let startup = event_loop.on_run();
-
let mut interval = event_loop.add_interval(Duration::from_secs(1));
- async move {
- // Wait for startup.
- startup.await;
- loop {
- interval.tick().await;
- self.counter.set(self.counter.get() + 1);
- let mut builder = ping_sender.make_builder();
- let mut ping = ping::PingBuilder::new(builder.fbb());
- let iter = self.counter.get();
- ping.add_value(iter);
- let ping = ping.finish();
- builder.send(ping).expect("Can't send ping");
- }
+ event_loop.on_run().await;
+ loop {
+ interval.tick().await;
+ self.counter.set(self.counter.get() + 1);
+ let mut builder = ping_sender.make_builder();
+ let mut ping = ping::PingBuilder::new(builder.fbb());
+ let iter = self.counter.get();
+ ping.add_value(iter);
+ ping.add_send_time(event_loop.monotonic_now().into());
+ let ping = ping.finish();
+ builder.send(ping).expect("Can't send ping");
}
}
- fn handle_pong(&self, event_loop: &mut EventLoopRuntime) -> impl Future<Output = Never> + '_ {
+ async fn handle_pong(&self, event_loop: &EventLoopRuntime<'_>) -> Never {
// The watcher gives us incoming ping messages.
let mut pong_watcher: Watcher<pong::Pong> = event_loop.make_watcher("/test").unwrap();
- let startup = event_loop.on_run();
- async move {
- // Wait for startup.
- startup.await;
- loop {
- let pong = dbg!(pong_watcher.next().await);
- assert_eq!(
- pong.message().unwrap().value(),
- self.counter.get(),
- "Missed a reply"
- );
- }
+ event_loop.on_run().await;
+ loop {
+ let pong = dbg!(pong_watcher.next().await);
+ assert_eq!(
+ pong.message().unwrap().value(),
+ self.counter.get(),
+ "Missed a reply"
+ );
}
}
}
diff --git a/aos/events/pong.rs b/aos/events/pong.rs
index b817ac2..d2859a8 100644
--- a/aos/events/pong.rs
+++ b/aos/events/pong.rs
@@ -1,7 +1,6 @@
use aos_configuration as config;
use aos_events_event_loop_runtime::{EventLoopRuntime, Sender, Watcher};
use aos_events_shm_event_loop::ShmEventLoop;
-use core::future::Future;
use futures::never::Never;
use std::path::Path;
@@ -18,25 +17,22 @@
}
/// Responds to ping messages with an equivalent pong.
-fn pong(event_loop: &mut EventLoopRuntime) -> impl Future<Output = Never> {
+async fn pong(event_loop: &EventLoopRuntime<'_>) -> Never {
// The watcher gives us incoming ping messages.
let mut ping_watcher: Watcher<ping::Ping> = event_loop.make_watcher("/test").unwrap();
// The sender is used to send messages back to the pong channel.
let mut pong_sender: Sender<pong::Pong> = event_loop.make_sender("/test").unwrap();
- // Wait for startup.
- let startup = event_loop.on_run();
- async move {
- startup.await;
- loop {
- let ping = dbg!(ping_watcher.next().await);
+ event_loop.on_run().await;
+ loop {
+ let ping = dbg!(ping_watcher.next().await);
- let mut builder = pong_sender.make_builder();
- let mut pong = pong::PongBuilder::new(builder.fbb());
- pong.add_value(ping.message().unwrap().value());
- let pong = pong.finish();
- builder.send(pong).expect("Can't send pong reponse");
- }
+ let mut builder = pong_sender.make_builder();
+ let mut pong = pong::PongBuilder::new(builder.fbb());
+ pong.add_value(ping.message().unwrap().value());
+ pong.add_initial_send_time(event_loop.monotonic_now().into());
+ let pong = pong.finish();
+ builder.send(pong).expect("Can't send pong reponse");
}
}
diff --git a/aos/events/shm_event_loop.rs b/aos/events/shm_event_loop.rs
index 880f72b..cebf81b 100644
--- a/aos/events/shm_event_loop.rs
+++ b/aos/events/shm_event_loop.rs
@@ -163,7 +163,9 @@
/// ```
pub fn run_with<'env, F>(mut self, fun: F)
where
- F: for<'event_loop> FnOnce(&mut Scoped<'event_loop, 'env, EventLoopRuntime<'event_loop>>),
+ F: for<'event_loop> FnOnce(
+ &'event_loop mut Scoped<'event_loop, 'env, EventLoopRuntime<'event_loop>>,
+ ),
{
// SAFETY: The runtime and the event loop (i.e. self) both get destroyed at the end of this
// scope: first the runtime followed by the event loop. The runtime gets exclusive access
@@ -259,12 +261,11 @@
let mut event_loop = ShmEventLoop::new(config);
let exit_handle = event_loop.make_exit_handle();
event_loop.run_with(|runtime| {
- let mut watcher: Watcher<ping::Ping> = runtime
- .make_watcher("/test")
- .expect("Can't create `Ping` watcher");
- let on_run = runtime.on_run();
- runtime.spawn(async move {
- on_run.await;
+ runtime.spawn(async {
+ let mut watcher: Watcher<ping::Ping> = runtime
+ .make_watcher("/test")
+ .expect("Can't create `Ping` watcher");
+ runtime.on_run().await;
barrier.wait();
let ping = watcher.next().await;
assert_eq!(ping.message().unwrap().value(), VALUE);
@@ -277,12 +278,11 @@
let mut event_loop = ShmEventLoop::new(config);
let exit_handle = event_loop.make_exit_handle();
event_loop.run_with(|runtime| {
- let mut sender: Sender<ping::Ping> = runtime
- .make_sender("/test")
- .expect("Can't create `Ping` sender");
- let on_run = runtime.on_run();
- runtime.spawn(async move {
- on_run.await;
+ runtime.spawn(async {
+ let mut sender: Sender<ping::Ping> = runtime
+ .make_sender("/test")
+ .expect("Can't create `Ping` sender");
+ runtime.on_run().await;
// Give the waiting thread a chance to start.
barrier.wait();
let mut sender = sender.make_builder();
diff --git a/third_party/autocxx/Cargo.lock b/third_party/autocxx/Cargo.lock
index a7872e3..4587dcb 100644
--- a/third_party/autocxx/Cargo.lock
+++ b/third_party/autocxx/Cargo.lock
@@ -1392,9 +1392,9 @@
[[package]]
name = "rustix"
-version = "0.37.23"
+version = "0.37.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06"
+checksum = "d4eb579851244c2c03e7c24f501c3432bed80b8f720af1d6e5b0e0f01555a035"
dependencies = [
"bitflags 1.3.2",
"errno",
@@ -1561,7 +1561,7 @@
"cfg-if",
"fastrand",
"redox_syscall",
- "rustix 0.37.23",
+ "rustix 0.37.25",
"windows-sys",
]
@@ -1590,7 +1590,7 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e6bf6f19e9f8ed8d4048dc22981458ebcf406d67e94cd422e5ecd73d63b3237"
dependencies = [
- "rustix 0.37.23",
+ "rustix 0.37.25",
"windows-sys",
]
diff --git a/third_party/autocxx/engine/src/ast_discoverer.rs b/third_party/autocxx/engine/src/ast_discoverer.rs
index f555419..6be9d8f 100644
--- a/third_party/autocxx/engine/src/ast_discoverer.rs
+++ b/third_party/autocxx/engine/src/ast_discoverer.rs
@@ -672,7 +672,7 @@
}
};
discoveries.search_item(&itm, None).unwrap();
- assert!(discoveries.extern_rust_funs.get(0).unwrap().sig.ident == "bar");
+ assert!(discoveries.extern_rust_funs.first().unwrap().sig.ident == "bar");
}
#[test]
@@ -686,7 +686,7 @@
}
};
discoveries.search_item(&itm, None).unwrap();
- assert!(discoveries.extern_rust_funs.get(0).unwrap().sig.ident == "bar");
+ assert!(discoveries.extern_rust_funs.first().unwrap().sig.ident == "bar");
}
#[test]
@@ -702,7 +702,7 @@
assert!(
discoveries
.extern_rust_types
- .get(0)
+ .first()
.unwrap()
.get_final_ident()
== "Bar"
diff --git a/third_party/autocxx/engine/src/conversion/analysis/allocators.rs b/third_party/autocxx/engine/src/conversion/analysis/allocators.rs
index da25a77..aab637c 100644
--- a/third_party/autocxx/engine/src/conversion/analysis/allocators.rs
+++ b/third_party/autocxx/engine/src/conversion/analysis/allocators.rs
@@ -101,11 +101,11 @@
}
pub(crate) fn get_alloc_name(ty_name: &QualifiedName) -> QualifiedName {
- get_name(ty_name, "alloc")
+ get_name(ty_name, "autocxx_alloc")
}
pub(crate) fn get_free_name(ty_name: &QualifiedName) -> QualifiedName {
- get_name(ty_name, "free")
+ get_name(ty_name, "autocxx_free")
}
fn get_name(ty_name: &QualifiedName, label: &str) -> QualifiedName {
diff --git a/third_party/autocxx/engine/src/conversion/analysis/depth_first.rs b/third_party/autocxx/engine/src/conversion/analysis/depth_first.rs
index baadbd7..3c6241b 100644
--- a/third_party/autocxx/engine/src/conversion/analysis/depth_first.rs
+++ b/third_party/autocxx/engine/src/conversion/analysis/depth_first.rs
@@ -44,7 +44,7 @@
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
- let first_candidate = self.queue.get(0).map(|api| api.name());
+ let first_candidate = self.queue.front().map(|api| api.name());
while let Some(candidate) = self.queue.pop_front() {
if !candidate
.field_and_base_deps()
@@ -54,7 +54,7 @@
return Some(candidate);
}
self.queue.push_back(candidate);
- if self.queue.get(0).map(|api| api.name()) == first_candidate {
+ if self.queue.front().map(|api| api.name()) == first_candidate {
panic!(
"Failed to find a candidate; there must be a circular dependency. Queue is {}",
self.queue
diff --git a/third_party/autocxx/engine/src/conversion/analysis/fun/mod.rs b/third_party/autocxx/engine/src/conversion/analysis/fun/mod.rs
index 415e40a..dc953ad 100644
--- a/third_party/autocxx/engine/src/conversion/analysis/fun/mod.rs
+++ b/third_party/autocxx/engine/src/conversion/analysis/fun/mod.rs
@@ -80,7 +80,7 @@
#[derive(Clone, Debug)]
pub(crate) enum MethodKind {
- Normal(ReceiverMutability),
+ Normal,
Constructor { is_default: bool },
Static,
Virtual(ReceiverMutability),
@@ -964,7 +964,7 @@
let receiver_mutability =
receiver_mutability.expect("Failed to find receiver details");
match fun.virtualness {
- Virtualness::None => MethodKind::Normal(receiver_mutability),
+ Virtualness::None => MethodKind::Normal,
Virtualness::Virtual => MethodKind::Virtual(receiver_mutability),
Virtualness::PureVirtual => MethodKind::PureVirtual(receiver_mutability),
}
@@ -1152,7 +1152,7 @@
ref impl_for,
method_kind:
MethodKind::Constructor { .. }
- | MethodKind::Normal(..)
+ | MethodKind::Normal
| MethodKind::PureVirtual(..)
| MethodKind::Virtual(..),
..
diff --git a/third_party/autocxx/engine/src/conversion/analysis/pod/byvalue_checker.rs b/third_party/autocxx/engine/src/conversion/analysis/pod/byvalue_checker.rs
index 6e2e9b9..d0c828e 100644
--- a/third_party/autocxx/engine/src/conversion/analysis/pod/byvalue_checker.rs
+++ b/third_party/autocxx/engine/src/conversion/analysis/pod/byvalue_checker.rs
@@ -158,6 +158,12 @@
));
break;
}
+ None if ty_id.get_final_item() == "__BindgenBitfieldUnit" => {
+ field_safety_problem = PodState::UnsafeToBePod(format!(
+ "Type {tyname} could not be POD because it is a bitfield"
+ ));
+ break;
+ }
None => {
field_safety_problem = PodState::UnsafeToBePod(format!(
"Type {tyname} could not be POD because its dependent type {ty_id} isn't known"
diff --git a/third_party/autocxx/engine/src/conversion/analysis/type_converter.rs b/third_party/autocxx/engine/src/conversion/analysis/type_converter.rs
index 63f7ae9..99cd61d 100644
--- a/third_party/autocxx/engine/src/conversion/analysis/type_converter.rs
+++ b/third_party/autocxx/engine/src/conversion/analysis/type_converter.rs
@@ -251,7 +251,7 @@
let i = make_ident(s);
parse_quote! { #i }
})
- .chain(typ.path.segments.into_iter())
+ .chain(typ.path.segments)
.collect();
}
}
diff --git a/third_party/autocxx/engine/src/conversion/codegen_rs/mod.rs b/third_party/autocxx/engine/src/conversion/codegen_rs/mod.rs
index abab612..ee56169 100644
--- a/third_party/autocxx/engine/src/conversion/codegen_rs/mod.rs
+++ b/third_party/autocxx/engine/src/conversion/codegen_rs/mod.rs
@@ -1130,6 +1130,7 @@
let segs =
Self::find_output_mod_root(name.get_namespace()).chain(name.get_bindgen_path_idents());
Item::Use(parse_quote! {
+ #[allow(unused_imports)]
pub use #(#segs)::*;
})
}
diff --git a/third_party/autocxx/engine/src/conversion/mod.rs b/third_party/autocxx/engine/src/conversion/mod.rs
index 61d7d6d..fa24d4d 100644
--- a/third_party/autocxx/engine/src/conversion/mod.rs
+++ b/third_party/autocxx/engine/src/conversion/mod.rs
@@ -114,7 +114,7 @@
None => Err(ConvertError::NoContent),
Some((_, items)) => {
// Parse the bindgen mod.
- let items_to_process = items.drain(..).collect();
+ let items_to_process = std::mem::take(items);
let parser = ParseBindgen::new(self.config);
let apis = parser.parse_items(items_to_process, source_file_contents)?;
Self::dump_apis("parsing", &apis);
diff --git a/third_party/autocxx/engine/src/known_types.rs b/third_party/autocxx/engine/src/known_types.rs
index 10199fb..573be4e 100644
--- a/third_party/autocxx/engine/src/known_types.rs
+++ b/third_party/autocxx/engine/src/known_types.rs
@@ -12,7 +12,7 @@
use once_cell::sync::OnceCell;
use syn::{parse_quote, TypePath};
-//// The behavior of the type.
+/// The behavior of the type.
#[derive(Debug)]
enum Behavior {
CxxContainerPtr,
diff --git a/third_party/autocxx/engine/src/parse_file.rs b/third_party/autocxx/engine/src/parse_file.rs
index 16ea625..9dd620c 100644
--- a/third_party/autocxx/engine/src/parse_file.rs
+++ b/third_party/autocxx/engine/src/parse_file.rs
@@ -21,7 +21,7 @@
use std::{io::Read, path::PathBuf};
use std::{panic::UnwindSafe, path::Path, rc::Rc};
use syn::spanned::Spanned;
-use syn::{token::Brace, Item, ItemMod};
+use syn::Item;
use thiserror::Error;
/// Errors which may occur when parsing a Rust source file to discover
@@ -119,7 +119,7 @@
Segment::Cxx(CxxBridge::from(itm))
}
Item::Mod(itm) => {
- if let Some((brace, items)) = itm.content {
+ if let Some((_, items)) = itm.content {
let mut mod_state = State {
auto_allowlist: self.auto_allowlist,
..Default::default()
@@ -137,18 +137,9 @@
}
self.extra_superclasses.extend(mod_state.extra_superclasses);
self.discoveries.extend(mod_state.discoveries);
- Segment::Mod(
- mod_state.results,
- (
- brace,
- ItemMod {
- content: None,
- ..itm
- },
- ),
- )
+ Segment::Mod(mod_state.results)
} else {
- Segment::Other(Item::Mod(itm))
+ Segment::Other
}
}
Item::Struct(ref its) => {
@@ -189,13 +180,13 @@
self.discoveries
.search_item(&item, mod_path)
.map_err(ParseError::Discovery)?;
- Segment::Other(item)
+ Segment::Other
}
_ => {
self.discoveries
.search_item(&item, mod_path)
.map_err(ParseError::Discovery)?;
- Segment::Other(item)
+ Segment::Other
}
};
self.results.push(result);
@@ -283,8 +274,8 @@
enum Segment {
Autocxx(IncludeCppEngine),
Cxx(CxxBridge),
- Mod(Vec<Segment>, (Brace, ItemMod)),
- Other(Item),
+ Mod(Vec<Segment>),
+ Other,
}
pub trait CppBuildable {
@@ -303,7 +294,7 @@
.flat_map(|s| -> Box<dyn Iterator<Item = &IncludeCppEngine>> {
match s {
Segment::Autocxx(includecpp) => Box::new(std::iter::once(includecpp)),
- Segment::Mod(segments, _) => Box::new(do_get_autocxxes(segments)),
+ Segment::Mod(segments) => Box::new(do_get_autocxxes(segments)),
_ => Box::new(std::iter::empty()),
}
})
@@ -331,7 +322,7 @@
Segment::Cxx(cxxbridge) => {
Box::new(std::iter::once(cxxbridge as &dyn CppBuildable))
}
- Segment::Mod(segments, _) => Box::new(do_get_cpp_buildables(segments)),
+ Segment::Mod(segments) => Box::new(do_get_cpp_buildables(segments)),
_ => Box::new(std::iter::empty()),
}
})
@@ -349,7 +340,7 @@
.flat_map(|s| -> Box<dyn Iterator<Item = &mut IncludeCppEngine>> {
match s {
Segment::Autocxx(includecpp) => Box::new(std::iter::once(includecpp)),
- Segment::Mod(segments, _) => Box::new(do_get_autocxxes_mut(segments)),
+ Segment::Mod(segments) => Box::new(do_get_autocxxes_mut(segments)),
_ => Box::new(std::iter::empty()),
}
})
@@ -368,7 +359,7 @@
.flat_map(|s| -> Box<dyn Iterator<Item = &PathBuf>> {
match s {
Segment::Autocxx(includecpp) => Box::new(includecpp.include_dirs()),
- Segment::Mod(segments, _) => Box::new(do_get_include_dirs(segments)),
+ Segment::Mod(segments) => Box::new(do_get_include_dirs(segments)),
_ => Box::new(std::iter::empty()),
}
})
diff --git a/third_party/autocxx/engine/src/types.rs b/third_party/autocxx/engine/src/types.rs
index f7eaae0..3afbf9d 100644
--- a/third_party/autocxx/engine/src/types.rs
+++ b/third_party/autocxx/engine/src/types.rs
@@ -236,6 +236,10 @@
/// cxx.
#[derive(Error, Clone, Debug)]
pub enum InvalidIdentError {
+ #[error("Union are not supported by autocxx (and their bindgen names have __ so are not acceptable to cxx)")]
+ Union,
+ #[error("Bitfields are not supported by autocxx (and their bindgen names have __ so are not acceptable to cxx)")]
+ Bitfield,
#[error("Names containing __ are reserved by C++ so not acceptable to cxx")]
TooManyUnderscores,
#[error("bindgen decided to call this type _bindgen_ty_N because it couldn't deduce the correct name for it. That means we can't generate C++ bindings to it.")]
@@ -251,7 +255,12 @@
/// where code will be output as part of the `#[cxx::bridge]` mod.
pub fn validate_ident_ok_for_cxx(id: &str) -> Result<(), InvalidIdentError> {
validate_ident_ok_for_rust(id)?;
- if id.contains("__") {
+ // Provide a couple of more specific diagnostics if we can.
+ if id.starts_with("__BindgenBitfieldUnit") {
+ Err(InvalidIdentError::Bitfield)
+ } else if id.starts_with("__BindgenUnionField") {
+ Err(InvalidIdentError::Union)
+ } else if id.contains("__") {
Err(InvalidIdentError::TooManyUnderscores)
} else if id.starts_with("_bindgen_ty_") {
Err(InvalidIdentError::BindgenTy)
diff --git a/third_party/autocxx/integration-tests/src/lib.rs b/third_party/autocxx/integration-tests/src/lib.rs
index 2335ee5..02d672f 100644
--- a/third_party/autocxx/integration-tests/src/lib.rs
+++ b/third_party/autocxx/integration-tests/src/lib.rs
@@ -459,7 +459,7 @@
let generated_rs_files = build_results.1;
if let Some(code_checker) = &rust_code_checker {
- let mut file = File::open(generated_rs_files.get(0).ok_or(TestError::NoRs)?)
+ let mut file = File::open(generated_rs_files.first().ok_or(TestError::NoRs)?)
.map_err(TestError::RsFileOpen)?;
let mut content = String::new();
file.read_to_string(&mut content)
diff --git a/third_party/autocxx/integration-tests/tests/cpprefs_test.rs b/third_party/autocxx/integration-tests/tests/cpprefs_test.rs
index 9cc6d39..820e67a 100644
--- a/third_party/autocxx/integration-tests/tests/cpprefs_test.rs
+++ b/third_party/autocxx/integration-tests/tests/cpprefs_test.rs
@@ -52,6 +52,7 @@
indoc! {"
#include <string>
#include <sstream>
+ #include <cstdint>
class Goat {
public:
@@ -80,6 +81,7 @@
indoc! {"
#include <string>
#include <sstream>
+ #include <cstdint>
class Goat {
public:
diff --git a/third_party/autocxx/integration-tests/tests/integration_test.rs b/third_party/autocxx/integration-tests/tests/integration_test.rs
index ec2b1e7..0939cae 100644
--- a/third_party/autocxx/integration-tests/tests/integration_test.rs
+++ b/third_party/autocxx/integration-tests/tests/integration_test.rs
@@ -4490,6 +4490,7 @@
fn test_typedef_to_std() {
let hdr = indoc! {"
#include <string>
+ #include <cstdint>
typedef std::string my_string;
inline uint32_t take_str(my_string a) {
return a.size();
@@ -4523,6 +4524,7 @@
fn test_typedef_in_pod_struct() {
let hdr = indoc! {"
#include <string>
+ #include <cstdint>
typedef uint32_t my_int;
struct A {
my_int a;
@@ -4544,6 +4546,7 @@
fn test_cint_in_pod_struct() {
let hdr = indoc! {"
#include <string>
+ #include <cstdint>
struct A {
int a;
};
@@ -4613,6 +4616,7 @@
fn test_typedef_to_std_in_struct() {
let hdr = indoc! {"
#include <string>
+ #include <cstdint>
typedef std::string my_string;
struct A {
my_string a;
@@ -4998,6 +5002,43 @@
}
#[test]
+fn test_take_struct_built_array_in_function() {
+ let hdr = indoc! {"
+ #include <cstdint>
+ struct data {
+ char a[4];
+ };
+ uint32_t take_array(char a[4]) {
+ return a[0] + a[2];
+ }
+ "};
+ let rs = quote! {
+ let mut c = ffi::data { a: [ 10, 20, 30, 40 ] };
+ unsafe {
+ assert_eq!(ffi::take_array(c.a.as_mut_ptr()), 40);
+ }
+ };
+ run_test("", hdr, rs, &["take_array"], &["data"]);
+}
+
+#[test]
+fn test_take_array_in_function() {
+ let hdr = indoc! {"
+ #include <cstdint>
+ uint32_t take_array(char a[4]) {
+ return a[0] + a[2];
+ }
+ "};
+ let rs = quote! {
+ let mut a: [i8; 4] = [ 10, 20, 30, 40 ];
+ unsafe {
+ assert_eq!(ffi::take_array(a.as_mut_ptr()), 40);
+ }
+ };
+ run_test("", hdr, rs, &["take_array"], &[]);
+}
+
+#[test]
fn test_union_ignored() {
let hdr = indoc! {"
#include <cstdint>
@@ -7772,6 +7813,58 @@
}
#[test]
+fn test_pv_subclass_opaque_param() {
+ let hdr = indoc! {"
+ #include <cstdint>
+
+ typedef uint32_t MyUnsupportedType[4];
+
+ struct MySupportedType {
+ uint32_t a;
+ };
+
+ class MySuperType {
+ public:
+ virtual void foo(const MyUnsupportedType* foo, const MySupportedType* bar) const = 0;
+ virtual ~MySuperType() = default;
+ };
+ "};
+ run_test_ex(
+ "",
+ hdr,
+ quote! {
+ MySubType::new_rust_owned(MySubType { a: 3, cpp_peer: Default::default() });
+ },
+ quote! {
+ subclass!("MySuperType",MySubType)
+ extern_cpp_opaque_type!("MyUnsupportedType", crate::ffi2::MyUnsupportedType)
+ },
+ None,
+ None,
+ Some(quote! {
+
+ #[cxx::bridge]
+ pub mod ffi2 {
+ unsafe extern "C++" {
+ include!("input.h");
+ type MyUnsupportedType;
+ }
+ }
+ use autocxx::subclass::CppSubclass;
+ use ffi::MySuperType_methods;
+ #[autocxx::subclass::subclass]
+ pub struct MySubType {
+ a: u32
+ }
+ impl MySuperType_methods for MySubType {
+ unsafe fn foo(&self, _foo: *const ffi2::MyUnsupportedType, _bar: *const ffi::MySupportedType) {
+ }
+ }
+ }),
+ );
+}
+
+#[test]
fn test_pv_subclass_return() {
let hdr = indoc! {"
#include <cstdint>
@@ -12241,6 +12334,58 @@
run_test("", hdr, rs, &["A"], &[]);
}
+#[test]
+fn test_badly_named_alloc() {
+ let hdr = indoc! {"
+ #include <stdarg.h>
+ class A {
+ public:
+ void alloc();
+ };
+ "};
+ let rs = quote! {};
+ run_test("", hdr, rs, &["A"], &[]);
+}
+
+#[test]
+fn test_cpp_union_pod() {
+ let hdr = indoc! {"
+ typedef unsigned long long UInt64_t;
+ struct ManagedPtr_t_;
+ typedef struct ManagedPtr_t_ ManagedPtr_t;
+
+ typedef int (*ManagedPtr_ManagerFunction_t)(
+ ManagedPtr_t *managedPtr,
+ const ManagedPtr_t *srcPtr,
+ int operation);
+
+ typedef union {
+ int intValue;
+ void *ptr;
+ } ManagedPtr_t_data_;
+
+ struct ManagedPtr_t_ {
+ void *pointer;
+ ManagedPtr_t_data_ userData[4];
+ ManagedPtr_ManagerFunction_t manager;
+ };
+
+ typedef struct CorrelationId_t_ {
+ unsigned int size : 8;
+ unsigned int valueType : 4;
+ unsigned int classId : 16;
+ unsigned int reserved : 4;
+
+ union {
+ UInt64_t intValue;
+ ManagedPtr_t ptrValue;
+ } value;
+ } CorrelationId_t;
+ "};
+ run_test("", hdr, quote! {}, &["CorrelationId_t_"], &[]);
+ run_test_expect_fail("", hdr, quote! {}, &[], &["CorrelationId_t_"]);
+}
+
// Yet to test:
// - Ifdef
// - Out param pointers
diff --git a/third_party/flatbuffers/rust/flatbuffers/src/builder.rs b/third_party/flatbuffers/rust/flatbuffers/src/builder.rs
index 7d0f408..a6e6818 100644
--- a/third_party/flatbuffers/rust/flatbuffers/src/builder.rs
+++ b/third_party/flatbuffers/rust/flatbuffers/src/builder.rs
@@ -17,8 +17,11 @@
#[cfg(not(feature = "std"))]
use alloc::{vec, vec::Vec};
use core::cmp::max;
+use core::convert::Infallible;
+use core::fmt::{Debug, Display};
use core::iter::{DoubleEndedIterator, ExactSizeIterator};
use core::marker::PhantomData;
+use core::ops::{Add, AddAssign, Deref, DerefMut, Index, IndexMut, Sub, SubAssign};
use core::ptr::write_bytes;
use crate::endian_scalar::emplace_scalar;
@@ -30,6 +33,90 @@
use crate::vtable::{field_index_to_field_offset, VTable};
use crate::vtable_writer::VTableWriter;
+/// Trait to implement custom allocation strategies for [`FlatBufferBuilder`].
+///
+/// An implementation can be used with [`FlatBufferBuilder::new_in`], enabling a custom allocation
+/// strategy for the [`FlatBufferBuilder`].
+///
+/// # Safety
+///
+/// The implementation of the allocator must match the defined behavior as described by the
+/// comments.
+pub unsafe trait Allocator: DerefMut<Target = [u8]> {
+ /// A type describing allocation failures
+ type Error: Display + Debug;
+ /// Grows the buffer, with the old contents being moved to the end.
+ ///
+ /// NOTE: While not unsound, an implementation that doesn't grow the
+ /// internal buffer will get stuck in an infinite loop.
+ fn grow_downwards(&mut self) -> Result<(), Self::Error>;
+
+ /// Returns the size of the internal buffer in bytes.
+ fn len(&self) -> usize;
+}
+
+/// Default [`FlatBufferBuilder`] allocator backed by a [`Vec<u8>`].
+#[derive(Default)]
+pub struct DefaultAllocator(Vec<u8>);
+
+impl DefaultAllocator {
+ /// Builds the allocator from an existing buffer.
+ pub fn from_vec(buffer: Vec<u8>) -> Self {
+ Self(buffer)
+ }
+}
+
+impl Deref for DefaultAllocator {
+ type Target = [u8];
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+impl DerefMut for DefaultAllocator {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.0
+ }
+}
+
+// SAFETY: The methods are implemented as described by the documentation.
+unsafe impl Allocator for DefaultAllocator {
+ type Error = Infallible;
+ fn grow_downwards(&mut self) -> Result<(), Self::Error> {
+ let old_len = self.0.len();
+ let new_len = max(1, old_len * 2);
+
+ self.0.resize(new_len, 0);
+
+ if new_len == 1 {
+ return Ok(());
+ }
+
+ // calculate the midpoint, and safely copy the old end data to the new
+ // end position:
+ let middle = new_len / 2;
+ {
+ let (left, right) = &mut self.0[..].split_at_mut(middle);
+ right.copy_from_slice(left);
+ }
+ // finally, zero out the old end data.
+ {
+ let ptr = self.0[..middle].as_mut_ptr();
+ // Safety:
+ // ptr is byte aligned and of length middle
+ unsafe {
+ write_bytes(ptr, 0, middle);
+ }
+ }
+ Ok(())
+ }
+
+ fn len(&self) -> usize {
+ self.0.len()
+ }
+}
+
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
struct FieldLoc {
off: UOffsetT,
@@ -40,9 +127,9 @@
/// state. It has an owned `Vec<u8>` that grows as needed (up to the hardcoded
/// limit of 2GiB, which is set by the FlatBuffers format).
#[derive(Clone, Debug, Eq, PartialEq)]
-pub struct FlatBufferBuilder<'fbb> {
- owned_buf: Vec<u8>,
- head: usize,
+pub struct FlatBufferBuilder<'fbb, A: Allocator = DefaultAllocator> {
+ allocator: A,
+ head: ReverseIndex,
field_locs: Vec<FieldLoc>,
written_vtable_revpos: Vec<UOffsetT>,
@@ -57,7 +144,7 @@
_phantom: PhantomData<&'fbb ()>,
}
-impl<'fbb> FlatBufferBuilder<'fbb> {
+impl<'fbb> FlatBufferBuilder<'fbb, DefaultAllocator> {
/// Create a FlatBufferBuilder that is ready for writing.
pub fn new() -> Self {
Self::with_capacity(0)
@@ -77,14 +164,29 @@
/// an existing vector.
pub fn from_vec(buffer: Vec<u8>) -> Self {
// we need to check the size here because we create the backing buffer
- // directly, bypassing the typical way of using grow_owned_buf:
+ // directly, bypassing the typical way of using grow_allocator:
assert!(
buffer.len() <= FLATBUFFERS_MAX_BUFFER_SIZE,
"cannot initialize buffer bigger than 2 gigabytes"
);
- let head = buffer.len();
+ let allocator = DefaultAllocator::from_vec(buffer);
+ Self::new_in(allocator)
+ }
+
+ /// Destroy the FlatBufferBuilder, returning its internal byte vector
+ /// and the index into it that represents the start of valid data.
+ pub fn collapse(self) -> (Vec<u8>, usize) {
+ let index = self.head.to_forward_index(&self.allocator);
+ (self.allocator.0, index)
+ }
+}
+
+impl<'fbb, A: Allocator> FlatBufferBuilder<'fbb, A> {
+ /// Create a [`FlatBufferBuilder`] that is ready for writing with a custom [`Allocator`].
+ pub fn new_in(allocator: A) -> Self {
+ let head = ReverseIndex::end();
FlatBufferBuilder {
- owned_buf: buffer,
+ allocator,
head,
field_locs: Vec::new(),
@@ -101,6 +203,13 @@
}
}
+ /// Destroy the [`FlatBufferBuilder`], returning its [`Allocator`] and the index
+ /// into it that represents the start of valid data.
+ pub fn collapse_in(self) -> (A, usize) {
+ let index = self.head.to_forward_index(&self.allocator);
+ (self.allocator, index)
+ }
+
/// Reset the FlatBufferBuilder internal state. Use this method after a
/// call to a `finish` function in order to re-use a FlatBufferBuilder.
///
@@ -114,17 +223,11 @@
/// new object.
pub fn reset(&mut self) {
// memset only the part of the buffer that could be dirty:
- {
- let to_clear = self.owned_buf.len() - self.head;
- let ptr = self.owned_buf[self.head..].as_mut_ptr();
- // Safety:
- // Verified ptr is valid for `to_clear` above
- unsafe {
- write_bytes(ptr, 0, to_clear);
- }
- }
+ self.allocator[self.head.range_to_end()]
+ .iter_mut()
+ .for_each(|x| *x = 0);
- self.head = self.owned_buf.len();
+ self.head = ReverseIndex::end();
self.written_vtable_revpos.clear();
self.nested = false;
@@ -134,12 +237,6 @@
self.strings_pool.clear();
}
- /// Destroy the FlatBufferBuilder, returning its internal byte vector
- /// and the index into it that represents the start of valid data.
- pub fn collapse(self) -> (Vec<u8>, usize) {
- (self.owned_buf, self.head)
- }
-
/// Push a Push'able value onto the front of the in-progress data.
///
/// This function uses traits to provide a unified API for writing
@@ -150,7 +247,7 @@
self.align(sz, P::alignment());
self.make_space(sz);
{
- let (dst, rest) = self.owned_buf[self.head..].split_at_mut(sz);
+ let (dst, rest) = self.allocator[self.head.range_to_end()].split_at_mut(sz);
// Safety:
// Called make_space above
unsafe { x.push(dst, rest.len()) };
@@ -254,9 +351,9 @@
"create_shared_string can not be called when a table or vector is under construction",
);
- // Saves a ref to owned_buf since rust doesnt like us refrencing it
+ // Saves a ref to allocator since rust doesnt like us refrencing it
// in the binary_search_by code.
- let buf = &self.owned_buf;
+ let buf = &self.allocator;
let found = self.strings_pool.binary_search_by(|offset| {
let ptr = offset.value() as usize;
@@ -324,9 +421,9 @@
self.ensure_capacity(slice_size + UOffsetT::size());
self.head -= slice_size;
- let mut written_len = self.owned_buf.len() - self.head;
+ let mut written_len = self.head.distance_to_end();
- let buf = &mut self.owned_buf[self.head..self.head + slice_size];
+ let buf = &mut self.allocator[self.head.range_to(self.head + slice_size)];
for (item, out) in items.iter().zip(buf.chunks_exact_mut(elem_size)) {
written_len -= elem_size;
@@ -373,7 +470,7 @@
/// whether it has been finished.
#[inline]
pub fn unfinished_data(&self) -> &[u8] {
- &self.owned_buf[self.head..]
+ &self.allocator[self.head.range_to_end()]
}
/// Get the byte slice for the data that has been written after a call to
/// one of the `finish` functions.
@@ -382,7 +479,7 @@
#[inline]
pub fn finished_data(&self) -> &[u8] {
self.assert_finished("finished_bytes cannot be called when the buffer is not yet finished");
- &self.owned_buf[self.head..]
+ &self.allocator[self.head.range_to_end()]
}
/// Returns a mutable view of a finished buffer and location of where the flatbuffer starts.
/// Note that modifying the flatbuffer data may corrupt it.
@@ -390,7 +487,8 @@
/// Panics if the flatbuffer is not finished.
#[inline]
pub fn mut_finished_buffer(&mut self) -> (&mut [u8], usize) {
- (&mut self.owned_buf, self.head)
+ let index = self.head.to_forward_index(&self.allocator);
+ (&mut self.allocator[..], index)
}
/// Assert that a field is present in the just-finished Table.
///
@@ -405,13 +503,13 @@
let idx = self.used_space() - tab_revloc.value() as usize;
// Safety:
- // The value of TableFinishedWIPOffset is the offset from the end of owned_buf
+ // The value of TableFinishedWIPOffset is the offset from the end of the allocator
// to an SOffsetT pointing to a valid VTable
//
- // `self.owned_buf.len() = self.used_space() + self.head`
- // `self.owned_buf.len() - tab_revloc = self.used_space() - tab_revloc + self.head`
- // `self.owned_buf.len() - tab_revloc = idx + self.head`
- let tab = unsafe { Table::new(&self.owned_buf[self.head..], idx) };
+ // `self.allocator.len() = self.used_space() + self.head`
+ // `self.allocator.len() - tab_revloc = self.used_space() - tab_revloc + self.head`
+ // `self.allocator.len() - tab_revloc = idx + self.head`
+ let tab = unsafe { Table::new(&self.allocator[self.head.range_to_end()], idx) };
let o = tab.vtable().get(slot_byte_loc) as usize;
assert!(o != 0, "missing required field {}", assert_msg_name);
}
@@ -444,7 +542,7 @@
#[inline]
fn used_space(&self) -> usize {
- self.owned_buf.len() - self.head as usize
+ self.head.distance_to_end()
}
#[inline]
@@ -517,7 +615,8 @@
let vt_end_pos = self.head + vtable_byte_len;
{
// write the vtable header:
- let vtfw = &mut VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]);
+ let vtfw =
+ &mut VTableWriter::init(&mut self.allocator[vt_start_pos.range_to(vt_end_pos)]);
vtfw.write_vtable_byte_length(vtable_byte_len as VOffsetT);
vtfw.write_object_inline_size(table_object_size as VOffsetT);
@@ -527,20 +626,20 @@
vtfw.write_field_offset(fl.id, pos);
}
}
- let new_vt_bytes = &self.owned_buf[vt_start_pos..vt_end_pos];
+ let new_vt_bytes = &self.allocator[vt_start_pos.range_to(vt_end_pos)];
let found = self
.written_vtable_revpos
.binary_search_by(|old_vtable_revpos: &UOffsetT| {
- let old_vtable_pos = self.owned_buf.len() - *old_vtable_revpos as usize;
+ let old_vtable_pos = self.allocator.len() - *old_vtable_revpos as usize;
// Safety:
// Already written vtables are valid by construction
- let old_vtable = unsafe { VTable::init(&self.owned_buf, old_vtable_pos) };
+ let old_vtable = unsafe { VTable::init(&self.allocator, old_vtable_pos) };
new_vt_bytes.cmp(old_vtable.as_bytes())
});
let final_vtable_revpos = match found {
Ok(i) => {
// The new vtable is a duplicate so clear it.
- VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]).clear();
+ VTableWriter::init(&mut self.allocator[vt_start_pos.range_to(vt_end_pos)]).clear();
self.head += vtable_byte_len;
self.written_vtable_revpos[i]
}
@@ -552,17 +651,17 @@
}
};
// Write signed offset from table to its vtable.
- let table_pos = self.owned_buf.len() - object_revloc_to_vtable.value() as usize;
+ let table_pos = self.allocator.len() - object_revloc_to_vtable.value() as usize;
if cfg!(debug_assertions) {
// Safety:
// Verified slice length
let tmp_soffset_to_vt = unsafe {
- read_scalar::<UOffsetT>(&self.owned_buf[table_pos..table_pos + SIZE_UOFFSET])
+ read_scalar::<UOffsetT>(&self.allocator[table_pos..table_pos + SIZE_UOFFSET])
};
assert_eq!(tmp_soffset_to_vt, 0xF0F0_F0F0);
}
- let buf = &mut self.owned_buf[table_pos..table_pos + SIZE_SOFFSET];
+ let buf = &mut self.allocator[table_pos..table_pos + SIZE_SOFFSET];
// Safety:
// Verified length of buf above
unsafe {
@@ -579,39 +678,14 @@
// Only call this when you know it is safe to double the size of the buffer.
#[inline]
- fn grow_owned_buf(&mut self) {
- let old_len = self.owned_buf.len();
- let new_len = max(1, old_len * 2);
-
+ fn grow_allocator(&mut self) {
let starting_active_size = self.used_space();
-
- let diff = new_len - old_len;
- self.owned_buf.resize(new_len, 0);
- self.head += diff;
+ self.allocator
+ .grow_downwards()
+ .expect("Flatbuffer allocation failure");
let ending_active_size = self.used_space();
debug_assert_eq!(starting_active_size, ending_active_size);
-
- if new_len == 1 {
- return;
- }
-
- // calculate the midpoint, and safely copy the old end data to the new
- // end position:
- let middle = new_len / 2;
- {
- let (left, right) = &mut self.owned_buf[..].split_at_mut(middle);
- right.copy_from_slice(left);
- }
- // finally, zero out the old end data.
- {
- let ptr = self.owned_buf[..middle].as_mut_ptr();
- // Safety:
- // ptr is byte aligned and of length middle
- unsafe {
- write_bytes(ptr, 0, middle);
- }
- }
}
// with or without a size prefix changes how we load the data, so finish*
@@ -676,13 +750,13 @@
#[inline]
fn push_bytes_unprefixed(&mut self, x: &[u8]) -> UOffsetT {
let n = self.make_space(x.len());
- self.owned_buf[n..n + x.len()].copy_from_slice(x);
+ self.allocator[n.range_to(n + x.len())].copy_from_slice(x);
- n as UOffsetT
+ n.to_forward_index(&self.allocator) as UOffsetT
}
#[inline]
- fn make_space(&mut self, want: usize) -> usize {
+ fn make_space(&mut self, want: usize) -> ReverseIndex {
self.ensure_capacity(want);
self.head -= want;
self.head
@@ -699,13 +773,13 @@
);
while self.unused_ready_space() < want {
- self.grow_owned_buf();
+ self.grow_allocator();
}
want
}
#[inline]
fn unused_ready_space(&self) -> usize {
- self.head
+ self.allocator.len() - self.head.distance_to_end()
}
#[inline]
fn assert_nested(&self, fn_name: &'static str) {
@@ -754,3 +828,127 @@
Self::with_capacity(0)
}
}
+
+/// An index that indexes from the reverse of a slice.
+///
+/// Note that while the internal representation is an index
+/// from the end of a buffer, operations like `Add` and `Sub`
+/// behave like a regular index:
+///
+/// # Examples
+///
+/// ```ignore
+/// let buf = [0, 1, 2, 3, 4, 5];
+/// let idx = ReverseIndex::end() - 2;
+/// assert_eq!(&buf[idx.range_to_end()], &[4, 5]);
+/// assert_eq!(idx.to_forward_index(&buf), 4);
+/// ```
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+struct ReverseIndex(usize);
+
+impl ReverseIndex {
+ /// Returns an index set to the end.
+ ///
+ /// Note: Indexing this will result in an out of bounds error.
+ pub fn end() -> Self {
+ Self(0)
+ }
+
+ /// Returns a struct equivalent to the range `self..`
+ pub fn range_to_end(self) -> ReverseIndexRange {
+ ReverseIndexRange(self, ReverseIndex::end())
+ }
+
+ /// Returns a struct equivalent to the range `self..end`
+ pub fn range_to(self, end: ReverseIndex) -> ReverseIndexRange {
+ ReverseIndexRange(self, end)
+ }
+
+ /// Transforms this reverse index into a regular index for the given buffer.
+ pub fn to_forward_index<T>(self, buf: &[T]) -> usize {
+ buf.len() - self.0
+ }
+
+ /// Returns the number of elements until the end of the range.
+ pub fn distance_to_end(&self) -> usize {
+ self.0
+ }
+}
+
+impl Sub<usize> for ReverseIndex {
+ type Output = Self;
+
+ fn sub(self, rhs: usize) -> Self::Output {
+ Self(self.0 + rhs)
+ }
+}
+
+impl SubAssign<usize> for ReverseIndex {
+ fn sub_assign(&mut self, rhs: usize) {
+ *self = *self - rhs;
+ }
+}
+
+impl Add<usize> for ReverseIndex {
+ type Output = Self;
+
+ fn add(self, rhs: usize) -> Self::Output {
+ Self(self.0 - rhs)
+ }
+}
+
+impl AddAssign<usize> for ReverseIndex {
+ fn add_assign(&mut self, rhs: usize) {
+ *self = *self + rhs;
+ }
+}
+impl<T> Index<ReverseIndex> for [T] {
+ type Output = T;
+
+ fn index(&self, index: ReverseIndex) -> &Self::Output {
+ let index = index.to_forward_index(self);
+ &self[index]
+ }
+}
+
+impl<T> IndexMut<ReverseIndex> for [T] {
+ fn index_mut(&mut self, index: ReverseIndex) -> &mut Self::Output {
+ let index = index.to_forward_index(self);
+ &mut self[index]
+ }
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+struct ReverseIndexRange(ReverseIndex, ReverseIndex);
+
+impl<T> Index<ReverseIndexRange> for [T] {
+ type Output = [T];
+
+ fn index(&self, index: ReverseIndexRange) -> &Self::Output {
+ let start = index.0.to_forward_index(self);
+ let end = index.1.to_forward_index(self);
+ &self[start..end]
+ }
+}
+
+impl<T> IndexMut<ReverseIndexRange> for [T] {
+ fn index_mut(&mut self, index: ReverseIndexRange) -> &mut Self::Output {
+ let start = index.0.to_forward_index(self);
+ let end = index.1.to_forward_index(self);
+ &mut self[start..end]
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn reverse_index_test() {
+ let buf = [0, 1, 2, 3, 4, 5];
+ let idx = ReverseIndex::end() - 2;
+ assert_eq!(&buf[idx.range_to_end()], &[4, 5]);
+ assert_eq!(&buf[idx.range_to(idx + 1)], &[4]);
+ assert_eq!(idx.to_forward_index(&buf), 4);
+ }
+}
diff --git a/third_party/flatbuffers/rust/flatbuffers/src/lib.rs b/third_party/flatbuffers/rust/flatbuffers/src/lib.rs
index 2741811..324dc1a 100644
--- a/third_party/flatbuffers/rust/flatbuffers/src/lib.rs
+++ b/third_party/flatbuffers/rust/flatbuffers/src/lib.rs
@@ -48,7 +48,7 @@
mod vtable_writer;
pub use crate::array::{array_init, emplace_scalar_array, Array};
-pub use crate::builder::FlatBufferBuilder;
+pub use crate::builder::{Allocator, DefaultAllocator, FlatBufferBuilder};
pub use crate::endian_scalar::{emplace_scalar, read_scalar, read_scalar_at, EndianScalar};
pub use crate::follow::{Follow, FollowStart, FollowWith};
pub use crate::primitives::*;
diff --git a/third_party/flatbuffers/src/idl_gen_rust.cpp b/third_party/flatbuffers/src/idl_gen_rust.cpp
index 43237b2..824f33e 100644
--- a/third_party/flatbuffers/src/idl_gen_rust.cpp
+++ b/third_party/flatbuffers/src/idl_gen_rust.cpp
@@ -975,7 +975,8 @@
code_ += " }";
// Pack flatbuffers union value
code_ +=
- " pub fn pack(&self, fbb: &mut flatbuffers::FlatBufferBuilder)"
+ " pub fn pack<'b, A: flatbuffers::Allocator + 'b>(&self, fbb: &mut "
+ "flatbuffers::FlatBufferBuilder<'b, A>)"
" -> Option<flatbuffers::WIPOffset<flatbuffers::UnionWIPOffset>>"
" {";
code_ += " match self {";
@@ -1704,8 +1705,11 @@
code_.SetValue("MAYBE_LT",
TableBuilderArgsNeedsLifetime(struct_def) ? "<'args>" : "");
code_ += " #[allow(unused_mut)]";
- code_ += " pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>(";
- code_ += " _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>,";
+ code_ +=
+ " pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: "
+ "flatbuffers::Allocator + 'bldr>(";
+ code_ +=
+ " _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>,";
code_ += " {{MAYBE_US}}args: &'args {{STRUCT_TY}}Args{{MAYBE_LT}}";
code_ += " ) -> flatbuffers::WIPOffset<{{STRUCT_TY}}<'bldr>> {";
@@ -2097,15 +2101,20 @@
}
// Generate a builder struct:
- code_ += "{{ACCESS_TYPE}} struct {{STRUCT_TY}}Builder<'a: 'b, 'b> {";
- code_ += " fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>,";
+ code_ +=
+ "{{ACCESS_TYPE}} struct {{STRUCT_TY}}Builder<'a: 'b, 'b, A: "
+ "flatbuffers::Allocator + 'a> {";
+ code_ += " fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>,";
code_ +=
" start_: flatbuffers::WIPOffset<"
"flatbuffers::TableUnfinishedWIPOffset>,";
code_ += "}";
// Generate builder functions:
- code_ += "impl<'a: 'b, 'b> {{STRUCT_TY}}Builder<'a, 'b> {";
+ code_ +=
+ "impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> "
+ "{{STRUCT_TY}}Builder<'a, "
+ "'b, A> {";
ForAllTableFields(struct_def, [&](const FieldDef &field) {
const bool is_scalar = IsScalar(field.value.type.base_type);
std::string offset = namer_.LegacyRustFieldOffsetName(field);
@@ -2140,8 +2149,8 @@
// Struct initializer (all fields required);
code_ += " #[inline]";
code_ +=
- " pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> "
- "{{STRUCT_TY}}Builder<'a, 'b> {";
+ " pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>) -> "
+ "{{STRUCT_TY}}Builder<'a, 'b, A> {";
code_.SetValue("NUM_FIELDS", NumToString(struct_def.fields.vec.size()));
code_ += " let start = _fbb.start_table();";
code_ += " {{STRUCT_TY}}Builder {";
@@ -2244,9 +2253,9 @@
// Generate pack function.
code_ += "impl {{STRUCT_OTY}} {";
- code_ += " pub fn pack<'b>(";
+ code_ += " pub fn pack<'b, A: flatbuffers::Allocator + 'b>(";
code_ += " &self,";
- code_ += " _fbb: &mut flatbuffers::FlatBufferBuilder<'b>";
+ code_ += " _fbb: &mut flatbuffers::FlatBufferBuilder<'b, A>";
code_ += " ) -> flatbuffers::WIPOffset<{{STRUCT_TY}}<'b>> {";
// First we generate variables for each field and then later assemble them
// using "StructArgs" to more easily manage ownership of the builder.
@@ -2529,8 +2538,10 @@
// Finish a buffer with a given root object:
code_ += "#[inline]";
- code_ += "pub fn finish_{{STRUCT_FN}}_buffer<'a, 'b>(";
- code_ += " fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,";
+ code_ +=
+ "pub fn finish_{{STRUCT_FN}}_buffer<'a, 'b, A: "
+ "flatbuffers::Allocator + 'a>(";
+ code_ += " fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>,";
code_ += " root: flatbuffers::WIPOffset<{{STRUCT_TY}}<'a>>) {";
if (parser_.file_identifier_.length()) {
code_ += " fbb.finish(root, Some({{STRUCT_CONST}}_IDENTIFIER));";
@@ -2542,8 +2553,8 @@
code_ += "#[inline]";
code_ +=
"pub fn finish_size_prefixed_{{STRUCT_FN}}_buffer"
- "<'a, 'b>("
- "fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>, "
+ "<'a, 'b, A: flatbuffers::Allocator + 'a>("
+ "fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, "
"root: flatbuffers::WIPOffset<{{STRUCT_TY}}<'a>>) {";
if (parser_.file_identifier_.length()) {
code_ +=