1
0
Эх сурвалжийг харах

wip: close on putting it all together

just gotta tie a bow
Jonathan Kelley 3 жил өмнө
parent
commit
85e2dc2

+ 1 - 2
README.md

@@ -166,7 +166,7 @@ Dioxus is heavily inspired by React, but we want your transition to feel like an
 | Custom elements           | ✅      | ✅     | Define new element primitives                                        |
 | Custom elements           | ✅      | ✅     | Define new element primitives                                        |
 | Suspense                  | ✅      | ✅     | schedule future render from future/promise                           |
 | Suspense                  | ✅      | ✅     | schedule future render from future/promise                           |
 | Integrated error handling | ✅      | ✅     | Gracefully handle errors with ? syntax                               |
 | Integrated error handling | ✅      | ✅     | Gracefully handle errors with ? syntax                               |
-| NodeRef                   | ✅      | ✅     | gain direct access to nodes [1]                                      |
+| NodeRef                   | ✅      | ✅     | gain direct access to nodes                                          |
 | Re-hydration              | ✅      | ✅     | Pre-render to HTML to speed up first contentful paint                |
 | Re-hydration              | ✅      | ✅     | Pre-render to HTML to speed up first contentful paint                |
 | Jank-Free Rendering       | ✅      | ✅     | Large diffs are segmented across frames for silky-smooth transitions |
 | Jank-Free Rendering       | ✅      | ✅     | Large diffs are segmented across frames for silky-smooth transitions |
 | Cooperative Scheduling    | ✅      | ✅     | Prioritize important events over non-important events                |
 | Cooperative Scheduling    | ✅      | ✅     | Prioritize important events over non-important events                |
@@ -178,7 +178,6 @@ Dioxus is heavily inspired by React, but we want your transition to feel like an
 | Fine-grained reactivity   | 🛠      | ❓     | Skip diffing for fine-grain updates                                  |
 | Fine-grained reactivity   | 🛠      | ❓     | Skip diffing for fine-grain updates                                  |
 | Effects                   | 🛠      | ✅     | Run effects after a component has been committed to render           |
 | Effects                   | 🛠      | ✅     | Run effects after a component has been committed to render           |
 
 
-- [1] Currently blocked until we figure out a cross-platform way of exposing an imperative Node API.
 
 
 ### Phase 2: Advanced Toolkits
 ### Phase 2: Advanced Toolkits
 
 

+ 40 - 20
packages/core/src/arena.rs

@@ -4,6 +4,7 @@ use std::{cell::UnsafeCell, rc::Rc};
 
 
 use crate::heuristics::*;
 use crate::heuristics::*;
 use crate::innerlude::*;
 use crate::innerlude::*;
+use futures_channel::mpsc::{UnboundedReceiver, UnboundedSender};
 use futures_util::stream::FuturesUnordered;
 use futures_util::stream::FuturesUnordered;
 use fxhash::{FxHashMap, FxHashSet};
 use fxhash::{FxHashMap, FxHashSet};
 use slab::Slab;
 use slab::Slab;
@@ -33,8 +34,11 @@ impl ElementId {
 }
 }
 
 
 type Shared<T> = Rc<RefCell<T>>;
 type Shared<T> = Rc<RefCell<T>>;
-type TaskReceiver = futures_channel::mpsc::UnboundedReceiver<EventTrigger>;
-type TaskSender = futures_channel::mpsc::UnboundedSender<EventTrigger>;
+type UiReceiver = UnboundedReceiver<EventTrigger>;
+type UiSender = UnboundedSender<EventTrigger>;
+
+type TaskReceiver = UnboundedReceiver<ScopeId>;
+type TaskSender = UnboundedSender<ScopeId>;
 
 
 /// These are resources shared among all the components and the virtualdom itself
 /// These are resources shared among all the components and the virtualdom itself
 #[derive(Clone)]
 #[derive(Clone)]
@@ -43,16 +47,23 @@ pub struct SharedResources {
 
 
     pub(crate) heuristics: Shared<HeuristicsEngine>,
     pub(crate) heuristics: Shared<HeuristicsEngine>,
 
 
-    ///
-    pub task_sender: TaskSender,
+    // Used by "set_state" and co - is its own queue
+    pub immediate_sender: TaskSender,
+    pub immediate_receiver: Shared<TaskReceiver>,
+
+    /// Triggered by event listeners
+    pub ui_event_sender: UiSender,
+    pub ui_event_receiver: Shared<UiReceiver>,
 
 
-    pub task_receiver: Shared<TaskReceiver>,
+    // Garbage stored
+    pub pending_garbage: Shared<FxHashSet<ScopeId>>,
 
 
+    // In-flight futures
     pub async_tasks: Shared<FuturesUnordered<FiberTask>>,
     pub async_tasks: Shared<FuturesUnordered<FiberTask>>,
 
 
     /// We use a SlotSet to keep track of the keys that are currently being used.
     /// We use a SlotSet to keep track of the keys that are currently being used.
     /// However, we don't store any specific data since the "mirror"
     /// However, we don't store any specific data since the "mirror"
-    pub raw_elements: Rc<RefCell<Slab<()>>>,
+    pub raw_elements: Shared<Slab<()>>,
 
 
     pub task_setter: Rc<dyn Fn(ScopeId)>,
     pub task_setter: Rc<dyn Fn(ScopeId)>,
 }
 }
@@ -66,28 +77,30 @@ impl SharedResources {
         // elements are super cheap - the value takes no space
         // elements are super cheap - the value takes no space
         let raw_elements = Slab::with_capacity(2000);
         let raw_elements = Slab::with_capacity(2000);
 
 
-        let (sender, receiver) = futures_channel::mpsc::unbounded();
+        let (ui_sender, ui_receiver) = futures_channel::mpsc::unbounded();
+        let (immediate_sender, immediate_receiver) = futures_channel::mpsc::unbounded();
 
 
         let heuristics = HeuristicsEngine::new();
         let heuristics = HeuristicsEngine::new();
 
 
         // we allocate this task setter once to save us from having to allocate later
         // we allocate this task setter once to save us from having to allocate later
         let task_setter = {
         let task_setter = {
-            let queue = sender.clone();
+            let queue = immediate_sender.clone();
             let components = components.clone();
             let components = components.clone();
             Rc::new(move |idx: ScopeId| {
             Rc::new(move |idx: ScopeId| {
                 let comps = unsafe { &*components.get() };
                 let comps = unsafe { &*components.get() };
 
 
                 if let Some(scope) = comps.get(idx.0) {
                 if let Some(scope) = comps.get(idx.0) {
-                    queue
-                        .unbounded_send(EventTrigger::new(
-                            VirtualEvent::ScheduledUpdate {
-                                height: scope.height,
-                            },
-                            idx,
-                            None,
-                            EventPriority::High,
-                        ))
-                        .expect("The event queu receiver should *never* be dropped");
+                    todo!("implement immediates again")
+                    // queue
+                    //     .unbounded_send(EventTrigger::new(
+                    //         VirtualEvent::ScheduledUpdate {
+                    //             height: scope.height,
+                    //         },
+                    //         idx,
+                    //         None,
+                    //         EventPriority::High,
+                    //     ))
+                    //     .expect("The event queu receiver should *never* be dropped");
                 }
                 }
             }) as Rc<dyn Fn(ScopeId)>
             }) as Rc<dyn Fn(ScopeId)>
         };
         };
@@ -95,8 +108,15 @@ impl SharedResources {
         Self {
         Self {
             components,
             components,
             async_tasks: Rc::new(RefCell::new(FuturesUnordered::new())),
             async_tasks: Rc::new(RefCell::new(FuturesUnordered::new())),
-            task_receiver: Rc::new(RefCell::new(receiver)),
-            task_sender: sender,
+
+            ui_event_receiver: Rc::new(RefCell::new(ui_receiver)),
+            ui_event_sender: ui_sender,
+
+            immediate_receiver: Rc::new(RefCell::new(immediate_receiver)),
+            immediate_sender: immediate_sender,
+
+            pending_garbage: Rc::new(RefCell::new(FxHashSet::default())),
+
             heuristics: Rc::new(RefCell::new(heuristics)),
             heuristics: Rc::new(RefCell::new(heuristics)),
             raw_elements: Rc::new(RefCell::new(raw_elements)),
             raw_elements: Rc::new(RefCell::new(raw_elements)),
             task_setter,
             task_setter,

+ 20 - 32
packages/core/src/events.rs

@@ -58,32 +58,40 @@ impl Ord for EventKey {
 /// implement this form of scheduling internally, however Dioxus will perform its own scheduling as well.
 /// implement this form of scheduling internally, however Dioxus will perform its own scheduling as well.
 ///
 ///
 /// The ultimate goal of the scheduler is to manage latency of changes, prioritizing "flashier" changes over "subtler" changes.
 /// The ultimate goal of the scheduler is to manage latency of changes, prioritizing "flashier" changes over "subtler" changes.
+///
+/// React has a 5-tier priority system. However, they break things into "Continuous" and "Discrete" priority. For now,
+/// we keep it simple, and just use a 3-tier priority system.
+///
+/// - NoPriority = 0
+/// - LowPriority = 1
+/// - NormalPriority = 2
+/// - UserBlocking = 3
+/// - HighPriority = 4
+/// - ImmediatePriority = 5
 #[derive(Debug, PartialEq, Eq, Clone, Copy, Hash, PartialOrd, Ord)]
 #[derive(Debug, PartialEq, Eq, Clone, Copy, Hash, PartialOrd, Ord)]
 pub enum EventPriority {
 pub enum EventPriority {
-    /// Garbage collection is a type of work than can be scheduled around other work, but must be completed in a specific
-    /// order. The GC must be run for a component before any other future work for that component is run. Otherwise,
-    /// we will leak slots in our slab.
-    ///
-    /// Garbage collection mixes with the safety aspects of the virtualdom so it's very important to get it done before
-    /// other work.
-    GarbageCollection,
-
     /// "High Priority" work will not interrupt other high priority work, but will interrupt long medium and low priority work.
     /// "High Priority" work will not interrupt other high priority work, but will interrupt long medium and low priority work.
     ///
     ///
     /// This is typically reserved for things like user interaction.
     /// This is typically reserved for things like user interaction.
-    High,
+    ///
+    /// React calls these "discrete" events, but with an extra category of "user-blocking".
+    High = 2,
 
 
     /// "Medium priority" work is generated by page events not triggered by the user. These types of events are less important
     /// "Medium priority" work is generated by page events not triggered by the user. These types of events are less important
     /// than "High Priority" events and will take presedence over low priority events.
     /// than "High Priority" events and will take presedence over low priority events.
     ///
     ///
     /// This is typically reserved for VirtualEvents that are not related to keyboard or mouse input.
     /// This is typically reserved for VirtualEvents that are not related to keyboard or mouse input.
-    Medium,
+    ///
+    /// React calls these "continuous" events (e.g. mouse move, mouse wheel, touch move, etc).
+    Medium = 1,
 
 
     /// "Low Priority" work will always be pre-empted unless the work is significantly delayed, in which case it will be
     /// "Low Priority" work will always be pre-empted unless the work is significantly delayed, in which case it will be
     /// advanced to the front of the work queue until completed.
     /// advanced to the front of the work queue until completed.
     ///
     ///
     /// The primary user of Low Priority work is the asynchronous work system (suspense).
     /// The primary user of Low Priority work is the asynchronous work system (suspense).
-    Low,
+    ///
+    /// This is considered "idle" work or "background" work.
+    Low = 0,
 }
 }
 
 
 impl EventTrigger {
 impl EventTrigger {
@@ -103,21 +111,6 @@ impl EventTrigger {
 }
 }
 
 
 pub enum VirtualEvent {
 pub enum VirtualEvent {
-    /// Generated during diffing to signal that a component's nodes to be given back
-    ///
-    /// Typically has a high priority
-    ///
-    /// If an event is scheduled for a component that has "garbage", that garabge will be cleaned up before the event can
-    /// be processed.
-    GarbageCollection,
-
-    /// A type of "immediate" event scheduled by components
-    ///
-    /// Usually called through "set_state"
-    ScheduledUpdate {
-        height: u32,
-    },
-
     // Whenever a task is ready (complete) Dioxus produces this "AsyncEvent"
     // Whenever a task is ready (complete) Dioxus produces this "AsyncEvent"
     //
     //
     // Async events don't necessarily propagate into a scope being ran. It's up to the event itself
     // Async events don't necessarily propagate into a scope being ran. It's up to the event itself
@@ -179,10 +172,7 @@ impl VirtualEvent {
             | VirtualEvent::MouseEvent(_)
             | VirtualEvent::MouseEvent(_)
             | VirtualEvent::PointerEvent(_) => true,
             | VirtualEvent::PointerEvent(_) => true,
 
 
-            VirtualEvent::GarbageCollection
-            | VirtualEvent::ScheduledUpdate { .. }
-            | VirtualEvent::AsyncEvent { .. }
-            | VirtualEvent::SuspenseEvent { .. } => false,
+            VirtualEvent::AsyncEvent { .. } | VirtualEvent::SuspenseEvent { .. } => false,
         }
         }
     }
     }
 }
 }
@@ -205,8 +195,6 @@ impl std::fmt::Debug for VirtualEvent {
             VirtualEvent::ToggleEvent(_) => "ToggleEvent",
             VirtualEvent::ToggleEvent(_) => "ToggleEvent",
             VirtualEvent::MouseEvent(_) => "MouseEvent",
             VirtualEvent::MouseEvent(_) => "MouseEvent",
             VirtualEvent::PointerEvent(_) => "PointerEvent",
             VirtualEvent::PointerEvent(_) => "PointerEvent",
-            VirtualEvent::GarbageCollection => "GarbageCollection",
-            VirtualEvent::ScheduledUpdate { .. } => "SetStateEvent",
             VirtualEvent::AsyncEvent { .. } => "AsyncEvent",
             VirtualEvent::AsyncEvent { .. } => "AsyncEvent",
             VirtualEvent::SuspenseEvent { .. } => "SuspenseEvent",
             VirtualEvent::SuspenseEvent { .. } => "SuspenseEvent",
         };
         };

+ 0 - 12
packages/core/src/hooks.rs

@@ -260,24 +260,12 @@ impl<'src> SuspendedContext<'src> {
 #[derive(Clone, Copy)]
 #[derive(Clone, Copy)]
 pub struct NodeRef<'src, T: 'static>(&'src RefCell<T>);
 pub struct NodeRef<'src, T: 'static>(&'src RefCell<T>);
 
 
-// impl NodeRef<'src, T> {
-// fn set_ref(&self, new: Box<dyn Any>)
-// }
-// impl<'a, T> std::ops::Deref for NodeRef<'a, T> {
-//     type Target = Option<&'a T>;
-
-//     fn deref(&self) -> &Self::Target {
-//         // &self.node
-//     }
-// }
-
 pub fn use_node_ref<T, P>(cx: Context<P>) -> NodeRef<T> {
 pub fn use_node_ref<T, P>(cx: Context<P>) -> NodeRef<T> {
     cx.use_hook(
     cx.use_hook(
         |f| {},
         |f| {},
         |f| {
         |f| {
             //
             //
             todo!()
             todo!()
-            // NodeRef {}
         },
         },
         |f| {
         |f| {
             //
             //

+ 2 - 0
packages/core/src/lib.rs

@@ -40,6 +40,7 @@ pub(crate) mod innerlude {
     pub use crate::hooklist::*;
     pub use crate::hooklist::*;
     pub use crate::hooks::*;
     pub use crate::hooks::*;
     pub use crate::nodes::*;
     pub use crate::nodes::*;
+    pub use crate::scheduler::*;
     pub use crate::scope::*;
     pub use crate::scope::*;
     pub use crate::util::*;
     pub use crate::util::*;
     pub use crate::virtual_dom::*;
     pub use crate::virtual_dom::*;
@@ -67,6 +68,7 @@ pub mod heuristics;
 pub mod hooklist;
 pub mod hooklist;
 pub mod hooks;
 pub mod hooks;
 pub mod nodes;
 pub mod nodes;
+pub mod scheduler;
 pub mod scope;
 pub mod scope;
 pub mod signals;
 pub mod signals;
 pub mod util;
 pub mod util;

+ 119 - 0
packages/core/src/noderef.rs

@@ -0,0 +1,119 @@
+// let scope = diff_machine.get_scope_mut(&trigger.originator).unwrap();
+
+// let mut garbage_list = scope.consume_garbage();
+
+// let mut scopes_to_kill = Vec::new();
+// while let Some(node) = garbage_list.pop() {
+//     match &node.kind {
+//         VNodeKind::Text(_) => {
+//             self.shared.collect_garbage(node.direct_id());
+//         }
+//         VNodeKind::Anchor(_) => {
+//             self.shared.collect_garbage(node.direct_id());
+//         }
+//         VNodeKind::Suspended(_) => {
+//             self.shared.collect_garbage(node.direct_id());
+//         }
+
+//         VNodeKind::Element(el) => {
+//             self.shared.collect_garbage(node.direct_id());
+//             for child in el.children {
+//                 garbage_list.push(child);
+//             }
+//         }
+
+//         VNodeKind::Fragment(frag) => {
+//             for child in frag.children {
+//                 garbage_list.push(child);
+//             }
+//         }
+
+//         VNodeKind::Component(comp) => {
+//             // TODO: run the hook destructors and then even delete the scope
+
+//             let scope_id = comp.ass_scope.get().unwrap();
+//             let scope = self.get_scope(scope_id).unwrap();
+//             let root = scope.root();
+//             garbage_list.push(root);
+//             scopes_to_kill.push(scope_id);
+//         }
+//     }
+// }
+
+// for scope in scopes_to_kill {
+//     // oy kill em
+//     log::debug!("should be removing scope {:#?}", scope);
+// }
+
+// // On the primary event queue, there is no batching, we take them off one-by-one
+// let trigger = match receiver.try_next() {
+//     Ok(Some(trigger)) => trigger,
+//     _ => {
+//         // Continuously poll the future pool and the event receiver for work
+//         let mut tasks = self.shared.async_tasks.borrow_mut();
+//         let tasks_tasks = tasks.next();
+
+//         // if the new event generates work more important than our current fiber, we should consider switching
+//         // only switch if it impacts different scopes.
+//         let mut ui_receiver = self.shared.ui_event_receiver.borrow_mut();
+//         let ui_reciv_task = ui_receiver.next();
+
+//         // right now, this polling method will only catch batched set_states that don't get awaited.
+//         // However, in the future, we might be interested in batching set_states across await points
+//         let immediate_tasks = ();
+
+//         futures_util::pin_mut!(tasks_tasks);
+//         futures_util::pin_mut!(ui_reciv_task);
+
+//         // Poll the event receiver and the future pool for work
+//         // Abort early if our deadline has ran out
+//         let mut deadline = (&mut deadline_future).fuse();
+
+//         let trig = futures_util::select! {
+//             trigger = tasks_tasks => trigger,
+//             trigger = ui_reciv_task => trigger,
+
+//             // abort if we're out of time
+//             _ = deadline => { return Ok(diff_machine.mutations); }
+//         };
+
+//         trig.unwrap()
+//     }
+// };
+
+// async fn select_next_event(&mut self) -> Option<EventTrigger> {
+//     let mut receiver = self.shared.task_receiver.borrow_mut();
+
+//     // drain the in-flight events so that we can sort them out with the current events
+//     while let Ok(Some(trigger)) = receiver.try_next() {
+//         log::info!("retrieving event from receiver");
+//         let key = self.shared.make_trigger_key(&trigger);
+//         self.pending_events.insert(key, trigger);
+//     }
+
+//     if self.pending_events.is_empty() {
+//         // Continuously poll the future pool and the event receiver for work
+//         let mut tasks = self.shared.async_tasks.borrow_mut();
+//         let tasks_tasks = tasks.next();
+
+//         let mut receiver = self.shared.task_receiver.borrow_mut();
+//         let reciv_task = receiver.next();
+
+//         futures_util::pin_mut!(tasks_tasks);
+//         futures_util::pin_mut!(reciv_task);
+
+//         let trigger = match futures_util::future::select(tasks_tasks, reciv_task).await {
+//             futures_util::future::Either::Left((trigger, _)) => trigger,
+//             futures_util::future::Either::Right((trigger, _)) => trigger,
+//         }
+//         .unwrap();
+//         let key = self.shared.make_trigger_key(&trigger);
+//         self.pending_events.insert(key, trigger);
+//     }
+
+//     // pop the most important event off
+//     let key = self.pending_events.keys().next().unwrap().clone();
+//     let trigger = self.pending_events.remove(&key).unwrap();
+
+//     Some(trigger)
+// }

+ 116 - 0
packages/core/src/scheduler.rs

@@ -0,0 +1,116 @@
+use std::any::Any;
+
+use std::any::TypeId;
+use std::cell::{Ref, RefCell, RefMut};
+use std::collections::{BTreeMap, BTreeSet, BinaryHeap, HashMap, HashSet, VecDeque};
+use std::pin::Pin;
+
+use crate::innerlude::*;
+
+/// The "Mutations" object holds the changes that need to be made to the DOM.
+pub struct Mutations<'s> {
+    pub edits: Vec<DomEdit<'s>>,
+    pub noderefs: Vec<NodeRefMutation<'s>>,
+}
+
+impl<'s> Mutations<'s> {
+    pub fn new() -> Self {
+        let edits = Vec::new();
+        let noderefs = Vec::new();
+        Self { edits, noderefs }
+    }
+}
+
+// refs are only assigned once
+pub struct NodeRefMutation<'a> {
+    element: &'a mut Option<once_cell::sync::OnceCell<Box<dyn Any>>>,
+    element_id: ElementId,
+}
+
+impl<'a> NodeRefMutation<'a> {
+    pub fn downcast_ref<T: 'static>(&self) -> Option<&T> {
+        self.element
+            .as_ref()
+            .and_then(|f| f.get())
+            .and_then(|f| f.downcast_ref::<T>())
+    }
+    pub fn downcast_mut<T: 'static>(&mut self) -> Option<&mut T> {
+        self.element
+            .as_mut()
+            .and_then(|f| f.get_mut())
+            .and_then(|f| f.downcast_mut::<T>())
+    }
+}
+
+pub struct Scheduler {
+    current_priority: EventPriority,
+
+    dirty_scopes: [HashSet<DirtyScope>; 3],
+
+    fibers: Vec<Fiber<'static>>,
+}
+
+impl Scheduler {
+    pub fn new() -> Self {
+        Self {
+            fibers: Vec::new(),
+
+            current_priority: EventPriority::Low,
+
+            // low, medium, high
+            dirty_scopes: [HashSet::new(), HashSet::new(), HashSet::new()],
+        }
+    }
+
+    pub fn add_dirty_scope(&mut self, scope: ScopeId, priority: EventPriority) {
+        //
+
+        // generated_immediates
+        //     .entry(dirty_scope)
+        //     .and_modify(|cur_priority| {
+        //         if *cur_priority > new_priority {
+        //             *cur_priority = new_priority;
+        //         }
+        //     })
+        //     .or_insert_with(|| new_priority);
+    }
+
+    pub fn has_work() {}
+
+    pub fn progress_work(&mut self, machine: &mut DiffMachine) {}
+}
+
+#[derive(PartialEq, Eq, Copy, Clone, Debug)]
+pub struct DirtyScope {
+    height: u32,
+    priority: EventPriority,
+    start_tick: u32,
+}
+
+// fibers in dioxus aren't exactly the same as React's. Our fibers are more like a "saved state" of the diffing algorithm.
+pub struct Fiber<'a> {
+    // scopes that haven't been updated yet
+    pending_scopes: Vec<ScopeId>,
+
+    pending_nodes: Vec<*const VNode<'a>>,
+
+    // WIP edits
+    edits: Vec<DomEdit<'a>>,
+
+    started: bool,
+
+    // a fiber is finished when no more scopes or nodes are pending
+    completed: bool,
+}
+
+impl Fiber<'_> {
+    fn new() -> Self {
+        Self {
+            pending_scopes: Vec::new(),
+            pending_nodes: Vec::new(),
+            edits: Vec::new(),
+            started: false,
+            completed: false,
+        }
+    }
+}

+ 8 - 35
packages/core/src/scope.rs

@@ -193,51 +193,24 @@ impl Scope {
     }
     }
 
 
     // A safe wrapper around calling listeners
     // A safe wrapper around calling listeners
-    // calling listeners will invalidate the list of listeners
-    // The listener list will be completely drained because the next frame will write over previous listeners
-    pub(crate) fn call_listener(&mut self, trigger: EventTrigger) -> Result<()> {
-        let EventTrigger {
-            real_node_id,
-            event,
-            ..
-        } = trigger;
-
-        if let &VirtualEvent::AsyncEvent { .. } = &event {
-            log::info!("arrived a fiber event");
-            return Ok(());
-        }
-
-        log::debug!(
-            "There are  {:?} listeners associated with this scope {:#?}",
-            self.listeners.borrow().len(),
-            self.our_arena_idx
-        );
-
+    //
+    //
+    pub(crate) fn call_listener(&mut self, event: VirtualEvent, element: ElementId) -> Result<()> {
         let listners = self.listeners.borrow_mut();
         let listners = self.listeners.borrow_mut();
 
 
         let raw_listener = listners.iter().find(|lis| {
         let raw_listener = listners.iter().find(|lis| {
             let search = unsafe { &***lis };
             let search = unsafe { &***lis };
             let search_id = search.mounted_node.get();
             let search_id = search.mounted_node.get();
-            log::info!(
-                "searching listener {:#?} for real {:?}",
-                search_id,
-                real_node_id
-            );
-
-            match (real_node_id, search_id) {
-                (Some(e), Some(search_id)) => search_id == e,
-                _ => false,
+
+            // this assumes the node might not be mounted - should we assume that though?
+            match search_id.map(|f| f == element) {
+                Some(same) => same,
+                None => false,
             }
             }
         });
         });
 
 
         if let Some(raw_listener) = raw_listener {
         if let Some(raw_listener) = raw_listener {
             let listener = unsafe { &**raw_listener };
             let listener = unsafe { &**raw_listener };
-
-            // log::info!(
-            //     "calling listener {:?}, {:?}",
-            //     listener.event,
-            //     // listener.scope
-            // );
             let mut cb = listener.callback.borrow_mut();
             let mut cb = listener.callback.borrow_mut();
             if let Some(cb) = cb.as_mut() {
             if let Some(cb) = cb.as_mut() {
                 (cb)(event);
                 (cb)(event);

+ 98 - 257
packages/core/src/virtual_dom.rs

@@ -18,18 +18,17 @@
 //!
 //!
 //! This module includes just the barebones for a complete VirtualDOM API.
 //! This module includes just the barebones for a complete VirtualDOM API.
 //! Additional functionality is defined in the respective files.
 //! Additional functionality is defined in the respective files.
-#![allow(unreachable_code)]
 use futures_util::{Future, StreamExt};
 use futures_util::{Future, StreamExt};
 use fxhash::FxHashMap;
 use fxhash::FxHashMap;
 
 
 use crate::hooks::{SuspendedContext, SuspenseHook};
 use crate::hooks::{SuspendedContext, SuspenseHook};
-use crate::{arena::SharedResources, innerlude::*};
+use crate::innerlude::*;
 
 
 use std::any::Any;
 use std::any::Any;
 
 
 use std::any::TypeId;
 use std::any::TypeId;
 use std::cell::{Ref, RefCell, RefMut};
 use std::cell::{Ref, RefCell, RefMut};
-use std::collections::{BTreeMap, BTreeSet, BinaryHeap, HashSet};
+use std::collections::{BTreeMap, BTreeSet, BinaryHeap, HashMap, HashSet, VecDeque};
 use std::pin::Pin;
 use std::pin::Pin;
 
 
 /// An integrated virtual node system that progresses events and diffs UI trees.
 /// An integrated virtual node system that progresses events and diffs UI trees.
@@ -53,7 +52,7 @@ pub struct VirtualDom {
     /// Should always be the first (gen=0, id=0)
     /// Should always be the first (gen=0, id=0)
     base_scope: ScopeId,
     base_scope: ScopeId,
 
 
-    active_fibers: Vec<Fiber<'static>>,
+    scheduler: Scheduler,
 
 
     // for managing the props that were used to create the dom
     // for managing the props that were used to create the dom
     #[doc(hidden)]
     #[doc(hidden)]
@@ -144,7 +143,7 @@ impl VirtualDom {
             base_scope,
             base_scope,
             _root_props: root_props,
             _root_props: root_props,
             shared: components,
             shared: components,
-            active_fibers: Vec::new(),
+            scheduler: Scheduler::new(),
             _root_prop_type: TypeId::of::<P>(),
             _root_prop_type: TypeId::of::<P>(),
         }
         }
     }
     }
@@ -218,43 +217,6 @@ impl VirtualDom {
         Ok(edits)
         Ok(edits)
     }
     }
 
 
-    // async fn select_next_event(&mut self) -> Option<EventTrigger> {
-    //     let mut receiver = self.shared.task_receiver.borrow_mut();
-
-    //     // drain the in-flight events so that we can sort them out with the current events
-    //     while let Ok(Some(trigger)) = receiver.try_next() {
-    //         log::info!("retrieving event from receiver");
-    //         let key = self.shared.make_trigger_key(&trigger);
-    //         self.pending_events.insert(key, trigger);
-    //     }
-
-    //     if self.pending_events.is_empty() {
-    //         // Continuously poll the future pool and the event receiver for work
-    //         let mut tasks = self.shared.async_tasks.borrow_mut();
-    //         let tasks_tasks = tasks.next();
-
-    //         let mut receiver = self.shared.task_receiver.borrow_mut();
-    //         let reciv_task = receiver.next();
-
-    //         futures_util::pin_mut!(tasks_tasks);
-    //         futures_util::pin_mut!(reciv_task);
-
-    //         let trigger = match futures_util::future::select(tasks_tasks, reciv_task).await {
-    //             futures_util::future::Either::Left((trigger, _)) => trigger,
-    //             futures_util::future::Either::Right((trigger, _)) => trigger,
-    //         }
-    //         .unwrap();
-    //         let key = self.shared.make_trigger_key(&trigger);
-    //         self.pending_events.insert(key, trigger);
-    //     }
-
-    //     // pop the most important event off
-    //     let key = self.pending_events.keys().next().unwrap().clone();
-    //     let trigger = self.pending_events.remove(&key).unwrap();
-
-    //     Some(trigger)
-    // }
-
     /// Runs the virtualdom immediately, not waiting for any suspended nodes to complete.
     /// Runs the virtualdom immediately, not waiting for any suspended nodes to complete.
     ///
     ///
     /// This method will not wait for any suspended tasks, completely skipping over
     /// This method will not wait for any suspended tasks, completely skipping over
@@ -320,24 +282,38 @@ impl VirtualDom {
         &'s mut self,
         &'s mut self,
         mut deadline: impl Future<Output = ()>,
         mut deadline: impl Future<Output = ()>,
     ) -> Result<Mutations<'s>> {
     ) -> Result<Mutations<'s>> {
-        // Configure our deadline
-        use futures_util::FutureExt;
-        let mut deadline_future = deadline.boxed_local();
+        /*
+        Strategy:
+        1. Check if there are any UI events in the receiver.
+        2. If there are, run the listener and then mark the dirty nodes
+        3. If there are dirty nodes to be progressed, do so.
+        4. Poll the task queue to see if we can create more dirty scopes.
+        5. Resume any current in-flight work if there is some.
+        6. While the deadline is not met, progress work, periodically checking the deadline.
+
+
+        How to choose work:
+        - When a scope is marked as dirty, it is given a priority.
+        - If a dirty scope chains (borrowed) into children, mark those as dirty as well.
+        - When the work loop starts, work on the highest priority scopes first.
+        - Work by priority, choosing to pause in-flight work if higher-priority work is ready.
 
 
-        let is_ready = || -> bool { (&mut deadline_future).now_or_never().is_some() };
 
 
-        let mut diff_machine = DiffMachine::new(Mutations::new(), self.base_scope, &self.shared);
 
 
-        /*
-        Strategy:
-        1. Check if there are any events in the receiver.
-        2. If there are, process them and create a new fiber.
-        3. If there are no events, then choose a fiber to work on.
         4. If there are no fibers, then wait for the next event from the receiver. Abort if the deadline is reached.
         4. If there are no fibers, then wait for the next event from the receiver. Abort if the deadline is reached.
         5. While processing a fiber, periodically check if we're out of time
         5. While processing a fiber, periodically check if we're out of time
         6. If our deadling is reached, then commit our edits to the realdom
         6. If our deadling is reached, then commit our edits to the realdom
         7. Whenever a fiber is finished, immediately commit it. (IE so deadlines can be infinite if unsupported)
         7. Whenever a fiber is finished, immediately commit it. (IE so deadlines can be infinite if unsupported)
 
 
+
+        // 1. Check if there are any events in the receiver.
+        // 2. If there are, process them and create a new fiber.
+        // 3. If there are no events, then choose a fiber to work on.
+        // 4. If there are no fibers, then wait for the next event from the receiver. Abort if the deadline is reached.
+        // 5. While processing a fiber, periodically check if we're out of time
+        // 6. If our deadling is reached, then commit our edits to the realdom
+        // 7. Whenever a fiber is finished, immediately commit it. (IE so deadlines can be infinite if unsupported)
+
         We slice fibers based on time. Each batch of events between frames is its own fiber. This is the simplest way
         We slice fibers based on time. Each batch of events between frames is its own fiber. This is the simplest way
         to conceptualize what *is* or *isn't* a fiber. IE if a bunch of events occur during a time slice, they all
         to conceptualize what *is* or *isn't* a fiber. IE if a bunch of events occur during a time slice, they all
         get batched together as a single operation of "dirty" scopes.
         get batched together as a single operation of "dirty" scopes.
@@ -348,101 +324,39 @@ impl VirtualDom {
         and listeners hold references to hook data, it is wrong to run a scope that is already being diffed.
         and listeners hold references to hook data, it is wrong to run a scope that is already being diffed.
         */
         */
 
 
-        // 1. Consume any pending events and create new fibers
-        let mut receiver = self.shared.task_receiver.borrow_mut();
-
-        let current_fiber = {
-            //
-            self.active_fibers.get_mut(0).unwrap()
-        };
-
-        // On the primary event queue, there is no batching.
-        let mut trigger = {
-            match receiver.try_next() {
-                Ok(Some(trigger)) => trigger,
-                _ => {
-                    // Continuously poll the future pool and the event receiver for work
-                    let mut tasks = self.shared.async_tasks.borrow_mut();
-                    let tasks_tasks = tasks.next();
-
-                    // if the new event generates work more important than our current fiber, we should consider switching
-                    // only switch if it impacts different scopes.
-                    let mut receiver = self.shared.task_receiver.borrow_mut();
-                    let reciv_task = receiver.next();
-
-                    futures_util::pin_mut!(tasks_tasks);
-                    futures_util::pin_mut!(reciv_task);
-
-                    // Poll the event receiver and the future pool for work
-                    // Abort early if our deadline has ran out
-                    let mut deadline = (&mut deadline_future).fuse();
-
-                    let trig = futures_util::select! {
-                        trigger = tasks_tasks => trigger,
-                        trigger = reciv_task => trigger,
-                        _ = deadline => { return Ok(diff_machine.mutations); }
-                    };
-
-                    trig.unwrap()
-                }
-            }
-        };
-
-        // since the last time we were ran with a deadline, we've accumulated many updates
-        // IE a button was clicked twice, or a scroll trigger was fired twice.
-        // We consider the button a event to be a function of the current state, which means we can batch many updates
-        // together.
-
-        match &trigger.event {
-            // If any input event is received, then we need to create a new fiber
-            VirtualEvent::ClipboardEvent(_)
-            | VirtualEvent::CompositionEvent(_)
-            | VirtualEvent::KeyboardEvent(_)
-            | VirtualEvent::FocusEvent(_)
-            | VirtualEvent::FormEvent(_)
-            | VirtualEvent::SelectionEvent(_)
-            | VirtualEvent::TouchEvent(_)
-            | VirtualEvent::UIEvent(_)
-            | VirtualEvent::WheelEvent(_)
-            | VirtualEvent::MediaEvent(_)
-            | VirtualEvent::AnimationEvent(_)
-            | VirtualEvent::TransitionEvent(_)
-            | VirtualEvent::ToggleEvent(_)
-            | VirtualEvent::MouseEvent(_)
-            | VirtualEvent::PointerEvent(_) => {
-                //
-                if let Some(scope) = self.shared.get_scope_mut(trigger.originator) {
-                    scope.call_listener(trigger)?;
-                }
-            }
+        let mut diff_machine = DiffMachine::new(Mutations::new(), self.base_scope, &self.shared);
 
 
-            VirtualEvent::AsyncEvent { .. } => while let Ok(Some(event)) = receiver.try_next() {},
+        // 1. Drain the existing immediates.
+        //
+        // These are generated by async tasks that we never got a chance to finish.
+        // All of these get scheduled with the lowest priority.
+        while let Ok(Some(dirty_scope)) = self.shared.immediate_receiver.borrow_mut().try_next() {
+            self.scheduler
+                .add_dirty_scope(dirty_scope, EventPriority::Low);
+        }
 
 
-            // These shouldn't normally be received, but if they are, it's done because some task set state manually
-            // Instead of processing it serially,
-            // We will batch all the scheduled updates together in one go.
-            VirtualEvent::ScheduledUpdate { .. } => {}
+        // 2. Drain the event queue, calling whatever listeners need to be called
+        //
+        while let Ok(Some(trigger)) = self.shared.ui_event_receiver.borrow_mut().try_next() {
+            match &trigger.event {
+                VirtualEvent::AsyncEvent { .. } => {}
 
 
-            // Suspense Events! A component's suspended node is updated
-            VirtualEvent::SuspenseEvent { hook_idx, domnode } => {
-                // Safety: this handler is the only thing that can mutate shared items at this moment in tim
-                let scope = diff_machine.get_scope_mut(&trigger.originator).unwrap();
+                // This suspense system works, but it's not the most elegant solution.
+                // TODO: Replace this system
+                VirtualEvent::SuspenseEvent { hook_idx, domnode } => {
+                    // Safety: this handler is the only thing that can mutate shared items at this moment in tim
+                    let scope = diff_machine.get_scope_mut(&trigger.originator).unwrap();
 
 
-                // safety: we are sure that there are no other references to the inner content of suspense hooks
-                let hook = unsafe { scope.hooks.get_mut::<SuspenseHook>(*hook_idx) }.unwrap();
+                    // safety: we are sure that there are no other references to the inner content of suspense hooks
+                    let hook = unsafe { scope.hooks.get_mut::<SuspenseHook>(*hook_idx) }.unwrap();
 
 
-                let cx = Context { scope, props: &() };
-                let scx = SuspendedContext { inner: cx };
+                    let cx = Context { scope, props: &() };
+                    let scx = SuspendedContext { inner: cx };
 
 
-                // generate the new node!
-                let nodes: Option<VNode> = (&hook.callback)(scx);
-                match nodes {
-                    None => {
-                        log::warn!(
-                            "Suspense event came through, but there were no generated nodes >:(."
-                        );
-                    }
-                    Some(nodes) => {
+                    // generate the new node!
+                    let nodes: Option<VNode> = (&hook.callback)(scx);
+
+                    if let Some(nodes) = nodes {
                         // allocate inside the finished frame - not the WIP frame
                         // allocate inside the finished frame - not the WIP frame
                         let nodes = scope.frames.finished_frame().bump.alloc(nodes);
                         let nodes = scope.frames.finished_frame().bump.alloc(nodes);
 
 
@@ -455,74 +369,65 @@ impl VirtualDom {
 
 
                         // replace the placeholder with the new nodes we just pushed on the stack
                         // replace the placeholder with the new nodes we just pushed on the stack
                         diff_machine.edit_replace_with(1, meta.added_to_stack);
                         diff_machine.edit_replace_with(1, meta.added_to_stack);
+                    } else {
+                        log::warn!(
+                            "Suspense event came through, but there were no generated nodes >:(."
+                        );
                     }
                     }
                 }
                 }
-            }
-
-            // Collecting garabge is not currently interruptible.
-            //
-            // In the future, it could be though
-            VirtualEvent::GarbageCollection => {
-                let scope = diff_machine.get_scope_mut(&trigger.originator).unwrap();
-
-                let mut garbage_list = scope.consume_garbage();
 
 
-                let mut scopes_to_kill = Vec::new();
-                while let Some(node) = garbage_list.pop() {
-                    match &node.kind {
-                        VNodeKind::Text(_) => {
-                            self.shared.collect_garbage(node.direct_id());
-                        }
-                        VNodeKind::Anchor(_) => {
-                            self.shared.collect_garbage(node.direct_id());
-                        }
-                        VNodeKind::Suspended(_) => {
-                            self.shared.collect_garbage(node.direct_id());
-                        }
-
-                        VNodeKind::Element(el) => {
-                            self.shared.collect_garbage(node.direct_id());
-                            for child in el.children {
-                                garbage_list.push(child);
-                            }
-                        }
-
-                        VNodeKind::Fragment(frag) => {
-                            for child in frag.children {
-                                garbage_list.push(child);
+                VirtualEvent::ClipboardEvent(_)
+                | VirtualEvent::CompositionEvent(_)
+                | VirtualEvent::KeyboardEvent(_)
+                | VirtualEvent::FocusEvent(_)
+                | VirtualEvent::FormEvent(_)
+                | VirtualEvent::SelectionEvent(_)
+                | VirtualEvent::TouchEvent(_)
+                | VirtualEvent::UIEvent(_)
+                | VirtualEvent::WheelEvent(_)
+                | VirtualEvent::MediaEvent(_)
+                | VirtualEvent::AnimationEvent(_)
+                | VirtualEvent::TransitionEvent(_)
+                | VirtualEvent::ToggleEvent(_)
+                | VirtualEvent::MouseEvent(_)
+                | VirtualEvent::PointerEvent(_) => {
+                    if let Some(scope) = self.shared.get_scope_mut(trigger.originator) {
+                        if let Some(element) = trigger.real_node_id {
+                            scope.call_listener(trigger.event, element)?;
+
+                            // Drain the immediates into the dirty scopes, setting the appropiate priorities
+                            while let Ok(Some(dirty_scope)) =
+                                self.shared.immediate_receiver.borrow_mut().try_next()
+                            {
+                                self.scheduler
+                                    .add_dirty_scope(dirty_scope, trigger.priority)
                             }
                             }
                         }
                         }
-
-                        VNodeKind::Component(comp) => {
-                            // TODO: run the hook destructors and then even delete the scope
-
-                            let scope_id = comp.ass_scope.get().unwrap();
-                            let scope = self.get_scope(scope_id).unwrap();
-                            let root = scope.root();
-                            garbage_list.push(root);
-                            scopes_to_kill.push(scope_id);
-                        }
                     }
                     }
                 }
                 }
-
-                for scope in scopes_to_kill {
-                    // oy kill em
-                    log::debug!("should be removing scope {:#?}", scope);
-                }
             }
             }
         }
         }
 
 
-        // while !deadline() {
-        //     let mut receiver = self.shared.task_receiver.borrow_mut();
+        // 3. Work through the fibers, and wait for any future work to be ready
 
 
-        //     // no messages to receive, just work on the fiber
-        // }
+        // Configure our deadline
+        use futures_util::FutureExt;
+        let mut deadline_future = deadline.boxed_local();
+        let mut is_ready = || -> bool { (&mut deadline_future).now_or_never().is_some() };
+
+        loop {
+            if is_ready() {
+                break;
+            }
+
+            self.scheduler
+        }
 
 
         Ok(diff_machine.mutations)
         Ok(diff_machine.mutations)
     }
     }
 
 
     pub fn get_event_sender(&self) -> futures_channel::mpsc::UnboundedSender<EventTrigger> {
     pub fn get_event_sender(&self) -> futures_channel::mpsc::UnboundedSender<EventTrigger> {
-        self.shared.task_sender.clone()
+        self.shared.ui_event_sender.clone()
     }
     }
 
 
     fn get_scope_mut(&mut self, id: ScopeId) -> Option<&mut Scope> {
     fn get_scope_mut(&mut self, id: ScopeId) -> Option<&mut Scope> {
@@ -534,67 +439,3 @@ impl VirtualDom {
 // These impls are actually wrong. The DOM needs to have a mutex implemented.
 // These impls are actually wrong. The DOM needs to have a mutex implemented.
 unsafe impl Sync for VirtualDom {}
 unsafe impl Sync for VirtualDom {}
 unsafe impl Send for VirtualDom {}
 unsafe impl Send for VirtualDom {}
-
-struct Fiber<'a> {
-    // scopes that haven't been updated yet
-    pending_scopes: Vec<ScopeId>,
-
-    pending_nodes: Vec<*const VNode<'a>>,
-
-    // WIP edits
-    edits: Vec<DomEdit<'a>>,
-
-    started: bool,
-
-    completed: bool,
-}
-
-impl Fiber<'_> {
-    fn new() -> Self {
-        Self {
-            pending_scopes: Vec::new(),
-            pending_nodes: Vec::new(),
-            edits: Vec::new(),
-            started: false,
-            completed: false,
-        }
-    }
-}
-
-/// The "Mutations" object holds the changes that need to be made to the DOM.
-pub struct Mutations<'s> {
-    // todo: apply node refs
-    // todo: apply effects
-    pub edits: Vec<DomEdit<'s>>,
-
-    pub noderefs: Vec<NodeRefMutation<'s>>,
-}
-
-impl<'s> Mutations<'s> {
-    pub fn new() -> Self {
-        let edits = Vec::new();
-        let noderefs = Vec::new();
-        Self { edits, noderefs }
-    }
-}
-
-// refs are only assigned once
-pub struct NodeRefMutation<'a> {
-    element: &'a mut Option<once_cell::sync::OnceCell<Box<dyn Any>>>,
-    element_id: ElementId,
-}
-
-impl<'a> NodeRefMutation<'a> {
-    pub fn downcast_ref<T: 'static>(&self) -> Option<&T> {
-        self.element
-            .as_ref()
-            .and_then(|f| f.get())
-            .and_then(|f| f.downcast_ref::<T>())
-    }
-    pub fn downcast_mut<T: 'static>(&mut self) -> Option<&mut T> {
-        self.element
-            .as_mut()
-            .and_then(|f| f.get_mut())
-            .and_then(|f| f.downcast_mut::<T>())
-    }
-}