Browse Source

wip: algo for scheduler finished

Jonathan Kelley 3 years ago
parent
commit
9ad5e49

+ 3 - 3
packages/core/src/childiter.rs

@@ -5,7 +5,7 @@ use crate::innerlude::*;
 /// This iterator is useful when it's important to load the next real root onto the top of the stack for operations like
 /// "InsertBefore".
 pub struct RealChildIterator<'a> {
-    scopes: &'a Scheduler,
+    scopes: &'a ResourcePool,
 
     // Heuristcally we should never bleed into 4 completely nested fragments/components
     // Smallvec lets us stack allocate our little stack machine so the vast majority of cases are sane
@@ -14,14 +14,14 @@ pub struct RealChildIterator<'a> {
 }
 
 impl<'a> RealChildIterator<'a> {
-    pub fn new(starter: &'a VNode<'a>, scopes: &'a Scheduler) -> Self {
+    pub fn new(starter: &'a VNode<'a>, scopes: &'a ResourcePool) -> Self {
         Self {
             scopes,
             stack: smallvec::smallvec![(0, starter)],
         }
     }
 
-    pub fn new_from_slice(nodes: &'a [VNode<'a>], scopes: &'a Scheduler) -> Self {
+    pub fn new_from_slice(nodes: &'a [VNode<'a>], scopes: &'a ResourcePool) -> Self {
         let mut stack = smallvec::smallvec![];
         for node in nodes {
             stack.push((0, node));

+ 14 - 39
packages/core/src/diff.rs

@@ -106,48 +106,20 @@ use DomEdit::*;
 /// Funnily enough, this stack machine's entire job is to create instructions for another stack machine to execute. It's
 /// stack machines all the way down!
 pub struct DiffMachine<'bump> {
-    vdom: SharedVdom<'bump>,
+    pub channel: EventChannel,
+    pub vdom: &'bump ResourcePool,
     pub mutations: Mutations<'bump>,
     pub stack: DiffStack<'bump>,
-    pub diffed: FxHashSet<ScopeId>,
     pub seen_scopes: FxHashSet<ScopeId>,
 }
 
-pub struct SharedVdom<'bump> {
-    pub components: &'bump mut Slab<Scope>,
-    pub elements: &'bump mut Slab<()>,
-    pub channel: EventChannel,
-}
-
-impl<'bump> SharedVdom<'bump> {
-    fn get_scope_mut(&mut self, scope: ScopeId) -> Option<&'bump mut Scope> {
-        todo!()
-    }
-
-    fn get_scope(&mut self, scope: ScopeId) -> Option<&'bump Scope> {
-        todo!()
-    }
-
-    fn reserve_node(&mut self) -> ElementId {
-        todo!()
-    }
-    fn collect_garbage(&mut self, element: ElementId) {}
-
-    pub fn insert_scope_with_key(&mut self, f: impl FnOnce(ScopeId) -> Scope) -> ScopeId {
-        let entry = self.components.vacant_entry();
-        let id = ScopeId(entry.key());
-        entry.insert(f(id));
-        id
-    }
-}
-
 /// a "saved" form of a diff machine
 /// in regular diff machine, the &'bump reference is a stack borrow, but the
 /// bump lifetimes are heap borrows.
 pub struct SavedDiffWork<'bump> {
+    pub channel: EventChannel,
     pub mutations: Mutations<'bump>,
     pub stack: DiffStack<'bump>,
-    pub diffed: FxHashSet<ScopeId>,
     pub seen_scopes: FxHashSet<ScopeId>,
 }
 
@@ -155,34 +127,38 @@ impl<'a> SavedDiffWork<'a> {
     pub unsafe fn extend(self: SavedDiffWork<'a>) -> SavedDiffWork<'static> {
         std::mem::transmute(self)
     }
-    pub unsafe fn promote<'b>(self, vdom: SharedVdom<'b>) -> DiffMachine<'b> {
+    pub unsafe fn promote<'b>(self, vdom: &'b mut ResourcePool) -> DiffMachine<'b> {
         let extended: SavedDiffWork<'b> = std::mem::transmute(self);
         DiffMachine {
             vdom,
+            channel: extended.channel,
             mutations: extended.mutations,
             stack: extended.stack,
-            diffed: extended.diffed,
             seen_scopes: extended.seen_scopes,
         }
     }
 }
 
 impl<'bump> DiffMachine<'bump> {
-    pub(crate) fn new(edits: Mutations<'bump>, shared: SharedVdom<'bump>) -> Self {
+    pub(crate) fn new(
+        edits: Mutations<'bump>,
+        shared: &'bump mut ResourcePool,
+        channel: EventChannel,
+    ) -> Self {
         Self {
+            channel,
             stack: DiffStack::new(),
             mutations: edits,
             vdom: shared,
-            diffed: FxHashSet::default(),
             seen_scopes: FxHashSet::default(),
         }
     }
 
     pub fn save(self) -> SavedDiffWork<'bump> {
         SavedDiffWork {
+            channel: self.channel,
             mutations: self.mutations,
             stack: self.stack,
-            diffed: self.diffed,
             seen_scopes: self.seen_scopes,
         }
     }
@@ -194,11 +170,10 @@ impl<'bump> DiffMachine<'bump> {
     // }
 
     //
-    pub async fn diff_scope(&mut self, id: ScopeId) {
+    pub async fn diff_scope(&'bump mut self, id: ScopeId) {
         if let Some(component) = self.vdom.get_scope_mut(id) {
             let (old, new) = (component.frames.wip_head(), component.frames.fin_head());
             self.stack.push(DiffInstruction::DiffNode { new, old });
-            self.work().await;
         }
     }
 
@@ -368,7 +343,7 @@ impl<'bump> DiffMachine<'bump> {
 
         let parent_idx = self.stack.current_scope().unwrap();
 
-        let shared = self.vdom.channel.clone();
+        let shared = self.channel.clone();
         // Insert a new scope into our component list
         let parent_scope = self.vdom.get_scope(parent_idx).unwrap();
         let new_idx = self.vdom.insert_scope_with_key(|new_idx| {

+ 1 - 5
packages/core/src/mutations.rs

@@ -13,16 +13,12 @@ pub struct Mutations<'a> {
 use DomEdit::*;
 
 impl<'a> Mutations<'a> {
-    pub fn new() -> Self {
+    pub(crate) fn new() -> Self {
         let edits = Vec::new();
         let noderefs = Vec::new();
         Self { edits, noderefs }
     }
 
-    pub fn extend(&mut self, other: &mut Mutations) {
-        // self.edits.extend(other.edits);
-    }
-
     // Navigation
     pub(crate) fn push_root(&mut self, root: ElementId) {
         let id = root.as_u64();

+ 244 - 260
packages/core/src/scheduler.rs

@@ -129,28 +129,7 @@ pub enum SchedulerMsg {
 ///
 ///
 pub struct Scheduler {
-    /*
-    This *has* to be an UnsafeCell.
-
-    Each BumpFrame and Scope is located in this Slab - and we'll need mutable access to a scope while holding on to
-    its bumpframe conents immutably.
-
-    However, all of the interaction with this Slab is done in this module and the Diff module, so it should be fairly
-    simple to audit.
-
-    Wrapped in Rc so the "get_shared_context" closure can walk the tree (immutably!)
-    */
-    pub components: Rc<UnsafeCell<Slab<Scope>>>,
-
-    /*
-    Yes, a slab of "nil". We use this for properly ordering ElementIDs - all we care about is the allocation strategy
-    that slab uses. The slab essentially just provides keys for ElementIDs that we can re-use in a Vec on the client.
-
-    This just happened to be the simplest and most efficient way to implement a deterministic keyed map with slot reuse.
-
-    In the future, we could actually store a pointer to the VNode instead of nil to provide O(1) lookup for VNodes...
-    */
-    pub raw_elements: Slab<()>,
+    pub pool: ResourcePool,
 
     pub heuristics: HeuristicsEngine,
 
@@ -175,7 +154,7 @@ pub struct Scheduler {
 
     pub garbage_scopes: HashSet<ScopeId>,
 
-    pub lanes: [PriortySystem; 4],
+    pub lanes: [PriorityLane; 4],
 }
 
 impl Scheduler {
@@ -185,7 +164,12 @@ impl Scheduler {
         Perhaps this should be configurable?
         */
         let components = Rc::new(UnsafeCell::new(Slab::with_capacity(100)));
-        let raw_elements = Slab::with_capacity(2000);
+        let raw_elements = Rc::new(UnsafeCell::new(Slab::with_capacity(2000)));
+
+        let pool = ResourcePool {
+            components: components.clone(),
+            raw_elements,
+        };
 
         let heuristics = HeuristicsEngine::new();
 
@@ -231,16 +215,15 @@ impl Scheduler {
         };
 
         Self {
+            pool,
             channel,
             receiver,
 
-            components,
             async_tasks: FuturesUnordered::new(),
 
             pending_garbage: FxHashSet::default(),
 
             heuristics,
-            raw_elements,
 
             // a storage for our receiver to dump into
             ui_events: VecDeque::new(),
@@ -255,125 +238,14 @@ impl Scheduler {
 
             // a dedicated fiber for each priority
             lanes: [
-                PriortySystem::new(),
-                PriortySystem::new(),
-                PriortySystem::new(),
-                PriortySystem::new(),
+                PriorityLane::new(),
+                PriorityLane::new(),
+                PriorityLane::new(),
+                PriorityLane::new(),
             ],
         }
     }
 
-    /// this is unsafe because the caller needs to track which other scopes it's already using
-    pub fn get_scope(&self, idx: ScopeId) -> Option<&Scope> {
-        let inner = unsafe { &*self.components.get() };
-        inner.get(idx.0)
-    }
-
-    /// this is unsafe because the caller needs to track which other scopes it's already using
-    pub fn get_scope_mut(&self, idx: ScopeId) -> Option<&mut Scope> {
-        let inner = unsafe { &mut *self.components.get() };
-        inner.get_mut(idx.0)
-    }
-
-    pub fn with_scope<'b, O: 'static>(
-        &'b self,
-        _id: ScopeId,
-        _f: impl FnOnce(&'b mut Scope) -> O,
-    ) -> Result<O> {
-        todo!()
-    }
-
-    // return a bumpframe with a lifetime attached to the arena borrow
-    // this is useful for merging lifetimes
-    pub fn with_scope_vnode<'b>(
-        &self,
-        _id: ScopeId,
-        _f: impl FnOnce(&mut Scope) -> &VNode<'b>,
-    ) -> Result<&VNode<'b>> {
-        todo!()
-    }
-
-    pub fn try_remove(&self, id: ScopeId) -> Result<Scope> {
-        let inner = unsafe { &mut *self.components.get() };
-        Ok(inner.remove(id.0))
-        // .try_remove(id.0)
-        // .ok_or_else(|| Error::FatalInternal("Scope not found"))
-    }
-
-    pub fn reserve_node(&self) -> ElementId {
-        todo!("reserving wip until it's fast enough again")
-        // ElementId(self.raw_elements.insert(()))
-    }
-
-    /// return the id, freeing the space of the original node
-    pub fn collect_garbage(&self, id: ElementId) {
-        todo!("garabge collection currently WIP")
-        // self.raw_elements.remove(id.0);
-    }
-
-    pub fn insert_scope_with_key(&self, f: impl FnOnce(ScopeId) -> Scope) -> ScopeId {
-        let g = unsafe { &mut *self.components.get() };
-        let entry = g.vacant_entry();
-        let id = ScopeId(entry.key());
-        entry.insert(f(id));
-        id
-    }
-
-    pub fn clean_up_garbage(&mut self) {
-        // let mut scopes_to_kill = Vec::new();
-        // let mut garbage_list = Vec::new();
-
-        todo!("garbage collection is currently immediate")
-        // for scope in self.garbage_scopes.drain() {
-        //     let scope = self.get_scope_mut(scope).unwrap();
-        //     for node in scope.consume_garbage() {
-        //         garbage_list.push(node);
-        //     }
-
-        //     while let Some(node) = garbage_list.pop() {
-        //         match &node {
-        //             VNode::Text(_) => {
-        //                 self.collect_garbage(node.direct_id());
-        //             }
-        //             VNode::Anchor(_) => {
-        //                 self.collect_garbage(node.direct_id());
-        //             }
-        //             VNode::Suspended(_) => {
-        //                 self.collect_garbage(node.direct_id());
-        //             }
-
-        //             VNode::Element(el) => {
-        //                 self.collect_garbage(node.direct_id());
-        //                 for child in el.children {
-        //                     garbage_list.push(child);
-        //                 }
-        //             }
-
-        //             VNode::Fragment(frag) => {
-        //                 for child in frag.children {
-        //                     garbage_list.push(child);
-        //                 }
-        //             }
-
-        //             VNode::Component(comp) => {
-        //                 // TODO: run the hook destructors and then even delete the scope
-
-        //                 let scope_id = comp.ass_scope.get().unwrap();
-        //                 let scope = self.get_scope(scope_id).unwrap();
-        //                 let root = scope.root();
-        //                 garbage_list.push(root);
-        //                 scopes_to_kill.push(scope_id);
-        //             }
-        //         }
-        //     }
-        // }
-
-        // for scope in scopes_to_kill.drain(..) {
-        //     //
-        //     // kill em
-        // }
-    }
-
     pub fn manually_poll_events(&mut self) {
         while let Ok(Some(msg)) = self.receiver.try_next() {
             self.handle_channel_msg(msg);
@@ -449,6 +321,43 @@ impl Scheduler {
         // unsafe { std::mem::transmute(fib) }
     }
 
+    fn shift_priorities(&mut self) {
+        self.current_priority = match (
+            self.lanes[0].has_work(),
+            self.lanes[1].has_work(),
+            self.lanes[2].has_work(),
+            self.lanes[3].has_work(),
+        ) {
+            (true, _, _, _) => EventPriority::Immediate,
+            (false, true, _, _) => EventPriority::High,
+            (false, false, true, _) => EventPriority::Medium,
+            (false, false, false, _) => EventPriority::Low,
+        };
+    }
+
+    fn load_current_lane(&mut self) -> &mut PriorityLane {
+        match self.current_priority {
+            EventPriority::Immediate => todo!(),
+            EventPriority::High => todo!(),
+            EventPriority::Medium => todo!(),
+            EventPriority::Low => todo!(),
+        }
+    }
+
+    fn save_work(&mut self, lane: SavedDiffWork) {
+        let saved: SavedDiffWork<'static> = unsafe { std::mem::transmute(lane) };
+        self.load_current_lane().saved_state = Some(saved);
+    }
+
+    fn load_work(&mut self) -> SavedDiffWork<'static> {
+        match self.current_priority {
+            EventPriority::Immediate => todo!(),
+            EventPriority::High => todo!(),
+            EventPriority::Medium => todo!(),
+            EventPriority::Low => todo!(),
+        }
+    }
+
     /// The primary workhorse of the VirtualDOM.
     ///
     /// Uses some fairly complex logic to schedule what work should be produced.
@@ -493,7 +402,7 @@ impl Scheduler {
 
             // Wait for any new events if we have nothing to do
             if !self.has_any_work() {
-                self.clean_up_garbage();
+                self.pool.clean_up_garbage();
                 let deadline_expired = self.wait_for_any_trigger(&mut deadline).await;
 
                 if deadline_expired {
@@ -506,133 +415,60 @@ impl Scheduler {
 
             // Work through the current subtree, and commit the results when it finishes
             // When the deadline expires, give back the work
-            self.current_priority = match (
-                self.lanes[0].has_work(),
-                self.lanes[1].has_work(),
-                self.lanes[2].has_work(),
-                self.lanes[3].has_work(),
-            ) {
-                (true, _, _, _) => EventPriority::Immediate,
-                (false, true, _, _) => EventPriority::High,
-                (false, false, true, _) => EventPriority::Medium,
-                (false, false, false, _) => EventPriority::Low,
-            };
+            self.shift_priorities();
 
-            let current_lane = match self.current_priority {
-                EventPriority::Immediate => &mut self.lanes[0],
-                EventPriority::High => &mut self.lanes[1],
-                EventPriority::Medium => &mut self.lanes[2],
-                EventPriority::Low => &mut self.lanes[3],
-            };
+            let saved_state = self.load_work();
 
-            if self.current_priority == EventPriority::Immediate {
-                // IDGAF - get this out the door right now. loop poll if we need to
+            // We have to split away some parts of ourself - current lane is borrowed mutably
+            let mut shared = self.pool.clone();
+            let mut machine = unsafe { saved_state.promote(&mut shared) };
+
+            if machine.stack.is_empty() {
+                let shared = self.pool.clone();
+                self.current_lane().dirty_scopes.sort_by(|a, b| {
+                    let h1 = shared.get_scope(*a).unwrap().height;
+                    let h2 = shared.get_scope(*b).unwrap().height;
+                    h1.cmp(&h2)
+                });
+
+                if let Some(scope) = self.current_lane().dirty_scopes.pop() {
+                    let component = self.pool.get_scope(scope).unwrap();
+                    let (old, new) = (component.frames.wip_head(), component.frames.fin_head());
+                    machine.stack.push(DiffInstruction::DiffNode { new, old });
+                }
             }
 
-            use futures_util::future::{select, Either};
+            let completed = {
+                let fut = machine.work();
+                pin_mut!(fut);
+                use futures_util::future::{select, Either};
+                match select(fut, &mut deadline).await {
+                    Either::Left((work, _other)) => true,
+                    Either::Right((deadline, _other)) => false,
+                }
+            };
 
-            // We have to split away some parts of ourself - current lane is borrowed mutably
+            let machine: DiffMachine<'static> = unsafe { std::mem::transmute(machine) };
+            let mut saved = machine.save();
 
-            let shared = SharedVdom {
-                channel: self.channel.clone(),
-                components: unsafe { &mut *self.components.get() },
-                elements: &mut self.raw_elements,
-            };
+            if completed {
+                for node in saved.seen_scopes.drain() {
+                    self.current_lane().dirty_scopes.remove(&node);
+                }
 
-            let mut state = current_lane.saved_state.take().unwrap();
-            let mut machine = unsafe { state.promote(shared) };
+                let mut new_mutations = Mutations::new();
+                std::mem::swap(&mut new_mutations, &mut saved.mutations);
 
-            if machine.stack.is_empty() {
-                // if let Some(scope) = current_lane.dirty_scopes.pop() {
-                //     let component = self.components.get_mut().get_mut(scope.0).unwrap();
-                //     let (old, new) = (component.frames.wip_head(), component.frames.fin_head());
-                //     machine.stack.push(DiffInstruction::DiffNode { new, old });
-                // } else {
-                // }
-            } else {
+                committed_mutations.push(new_mutations);
             }
+            self.save_work(saved);
 
-            // if let Some(state) = current_lane.saved_state.take() {
-            //     let mut machine = unsafe { state.promote(&self) };
-            //     machine.work().await;
-            // } else {
-            // if let Some(scope) = current_lane.dirty_scopes.pop() {
-            //
-
-            // let work_complete = {
-            //     let fut = machine.diff_scope(scope);
-            //     pin_mut!(fut);
-            //     match select(fut, &mut deadline).await {
-            //         Either::Left((work, _other)) => {
-            //             //
-            //             true
-            //         }
-            //         Either::Right((deadline, _other)) => {
-            //             //
-            //             false
-            //         }
-            //     }
-            // };
-
-            // let mut saved = unsafe { machine.save().extend() };
-
-            // // release the stack borrow of ourself
-            // if work_complete {
-            //     for scope in saved.seen_scopes.drain() {
-            //         current_lane.dirty_scopes.remove(&scope);
-            //     }
-            // } else {
-            // }
-            // }
-            // };
-
-            // let mut new_mutations = Mutations::new();
-            // match self.work_with_deadline(&mut deadline).await {
-            //     Some(mutations) => {
-            //         // safety: the scheduler will never let us mutate
-            //         let extended: Mutations<'static> = unsafe { std::mem::transmute(mutations) };
-            //         committed_mutations.push(extended)
-            //     }
-            //     None => return committed_mutations,
-            // }
+            if !completed {
+                break;
+            }
         }
-        // // check if we need to elevate priority
-
-        // // let mut machine = DiffMachine::new(mutations, ScopeId(0), &self);
-
-        // let dirty_root = {
-        //     let dirty_roots = match self.current_priority {
-        //         EventPriority::High => &self.high_priorty.dirty_scopes,
-        //         EventPriority::Medium => &self.medium_priority.dirty_scopes,
-        //         EventPriority::Low => &self.low_priority.dirty_scopes,
-        //     };
-        //     let mut height = 0;
-        //     let mut dirty_root = {
-        //         let root = dirty_roots.iter().next();
-        //         if root.is_none() {
-        //             return true;
-        //         }
-        //         root.unwrap()
-        //     };
-
-        //     for root in dirty_roots {
-        //         if let Some(scope) = self.get_scope(*root) {
-        //             if scope.height < height {
-        //                 height = scope.height;
-        //                 dirty_root = root;
-        //             }
-        //         }
-        //     }
-        //     dirty_root
-        // };
 
-        // let fut = machine.diff_scope(*dirty_root).fuse();
-        // pin_mut!(fut);
-
-        // match futures_util::future::select(deadline, fut).await {
-        //     futures_util::future::Either::Left((deadline, work_fut)) => true,
-        //     futures_util::future::Either::Right((_, deadline_fut)) => false,
-        // }
+        committed_mutations
     }
 
     // waits for a trigger, canceling early if the deadline is reached
@@ -663,6 +499,15 @@ impl Scheduler {
         }
     }
 
+    pub fn current_lane(&mut self) -> &mut PriorityLane {
+        match self.current_priority {
+            EventPriority::Immediate => &mut self.lanes[0],
+            EventPriority::High => &mut self.lanes[1],
+            EventPriority::Medium => &mut self.lanes[2],
+            EventPriority::Low => &mut self.lanes[3],
+        }
+    }
+
     pub fn handle_channel_msg(&mut self, msg: SchedulerMsg) {
         match msg {
             SchedulerMsg::Immediate(_) => todo!(),
@@ -677,7 +522,7 @@ impl Scheduler {
         }
     }
 
-    pub fn add_dirty_scope(&mut self, scope: ScopeId, priority: EventPriority) {
+    fn add_dirty_scope(&mut self, scope: ScopeId, priority: EventPriority) {
         todo!()
         // match priority {
         //     EventPriority::High => self.high_priorty.dirty_scopes.insert(scope),
@@ -687,13 +532,13 @@ impl Scheduler {
     }
 }
 
-pub struct PriortySystem {
+pub struct PriorityLane {
     pub dirty_scopes: IndexSet<ScopeId>,
     pub saved_state: Option<SavedDiffWork<'static>>,
     pub in_progress: bool,
 }
 
-impl PriortySystem {
+impl PriorityLane {
     pub fn new() -> Self {
         Self {
             saved_state: None,
@@ -800,3 +645,142 @@ pub enum EventPriority {
     /// This is considered "idle" work or "background" work.
     Low = 0,
 }
+
+#[derive(Clone)]
+pub struct ResourcePool {
+    /*
+    This *has* to be an UnsafeCell.
+
+    Each BumpFrame and Scope is located in this Slab - and we'll need mutable access to a scope while holding on to
+    its bumpframe conents immutably.
+
+    However, all of the interaction with this Slab is done in this module and the Diff module, so it should be fairly
+    simple to audit.
+
+    Wrapped in Rc so the "get_shared_context" closure can walk the tree (immutably!)
+    */
+    pub components: Rc<UnsafeCell<Slab<Scope>>>,
+
+    /*
+    Yes, a slab of "nil". We use this for properly ordering ElementIDs - all we care about is the allocation strategy
+    that slab uses. The slab essentially just provides keys for ElementIDs that we can re-use in a Vec on the client.
+
+    This just happened to be the simplest and most efficient way to implement a deterministic keyed map with slot reuse.
+
+    In the future, we could actually store a pointer to the VNode instead of nil to provide O(1) lookup for VNodes...
+    */
+    pub raw_elements: Rc<UnsafeCell<Slab<()>>>,
+}
+
+impl ResourcePool {
+    /// this is unsafe because the caller needs to track which other scopes it's already using
+    pub fn get_scope(&self, idx: ScopeId) -> Option<&Scope> {
+        let inner = unsafe { &*self.components.get() };
+        inner.get(idx.0)
+    }
+
+    /// this is unsafe because the caller needs to track which other scopes it's already using
+    pub fn get_scope_mut(&self, idx: ScopeId) -> Option<&mut Scope> {
+        let inner = unsafe { &mut *self.components.get() };
+        inner.get_mut(idx.0)
+    }
+
+    pub fn with_scope<'b, O: 'static>(
+        &'b self,
+        _id: ScopeId,
+        _f: impl FnOnce(&'b mut Scope) -> O,
+    ) -> Result<O> {
+        todo!()
+    }
+
+    // return a bumpframe with a lifetime attached to the arena borrow
+    // this is useful for merging lifetimes
+    pub fn with_scope_vnode<'b>(
+        &self,
+        _id: ScopeId,
+        _f: impl FnOnce(&mut Scope) -> &VNode<'b>,
+    ) -> Result<&VNode<'b>> {
+        todo!()
+    }
+
+    pub fn try_remove(&self, id: ScopeId) -> Result<Scope> {
+        let inner = unsafe { &mut *self.components.get() };
+        Ok(inner.remove(id.0))
+        // .try_remove(id.0)
+        // .ok_or_else(|| Error::FatalInternal("Scope not found"))
+    }
+
+    pub fn reserve_node(&self) -> ElementId {
+        todo!("reserving wip until it's fast enough again")
+        // ElementId(self.raw_elements.insert(()))
+    }
+
+    /// return the id, freeing the space of the original node
+    pub fn collect_garbage(&self, id: ElementId) {
+        todo!("garabge collection currently WIP")
+        // self.raw_elements.remove(id.0);
+    }
+
+    pub fn insert_scope_with_key(&self, f: impl FnOnce(ScopeId) -> Scope) -> ScopeId {
+        let g = unsafe { &mut *self.components.get() };
+        let entry = g.vacant_entry();
+        let id = ScopeId(entry.key());
+        entry.insert(f(id));
+        id
+    }
+
+    pub fn clean_up_garbage(&mut self) {
+        // let mut scopes_to_kill = Vec::new();
+        // let mut garbage_list = Vec::new();
+
+        todo!("garbage collection is currently immediate")
+        // for scope in self.garbage_scopes.drain() {
+        //     let scope = self.get_scope_mut(scope).unwrap();
+        //     for node in scope.consume_garbage() {
+        //         garbage_list.push(node);
+        //     }
+
+        //     while let Some(node) = garbage_list.pop() {
+        //         match &node {
+        //             VNode::Text(_) => {
+        //                 self.collect_garbage(node.direct_id());
+        //             }
+        //             VNode::Anchor(_) => {
+        //                 self.collect_garbage(node.direct_id());
+        //             }
+        //             VNode::Suspended(_) => {
+        //                 self.collect_garbage(node.direct_id());
+        //             }
+
+        //             VNode::Element(el) => {
+        //                 self.collect_garbage(node.direct_id());
+        //                 for child in el.children {
+        //                     garbage_list.push(child);
+        //                 }
+        //             }
+
+        //             VNode::Fragment(frag) => {
+        //                 for child in frag.children {
+        //                     garbage_list.push(child);
+        //                 }
+        //             }
+
+        //             VNode::Component(comp) => {
+        //                 // TODO: run the hook destructors and then even delete the scope
+
+        //                 let scope_id = comp.ass_scope.get().unwrap();
+        //                 let scope = self.get_scope(scope_id).unwrap();
+        //                 let root = scope.root();
+        //                 garbage_list.push(root);
+        //                 scopes_to_kill.push(scope_id);
+        //             }
+        //         }
+        //     }
+        // }
+
+        // for scope in scopes_to_kill.drain(..) {
+        //     //
+        //     // kill em
+        // }
+    }
+}

+ 5 - 4
packages/core/src/virtual_dom.rs

@@ -107,7 +107,7 @@ impl VirtualDom {
 
         let props_ptr = _root_props.as_ref().downcast_ref::<P>().unwrap() as *const P;
 
-        let base_scope = scheduler.insert_scope_with_key(|myidx| {
+        let base_scope = scheduler.pool.insert_scope_with_key(|myidx| {
             let caller = NodeFactory::create_component_caller(root, props_ptr as *const _);
             let name = type_name_of(root);
             Scope::new(
@@ -129,11 +129,11 @@ impl VirtualDom {
     }
 
     pub fn base_scope(&self) -> &Scope {
-        self.scheduler.get_scope(self.base_scope).unwrap()
+        self.scheduler.pool.get_scope(self.base_scope).unwrap()
     }
 
     pub fn get_scope(&self, id: ScopeId) -> Option<&Scope> {
-        self.scheduler.get_scope(id)
+        self.scheduler.pool.get_scope(id)
     }
 
     /// Performs a *full* rebuild of the virtual dom, returning every edit required to generate the actual dom rom scratch
@@ -194,10 +194,11 @@ impl VirtualDom {
     }
 
     pub async fn diff_async<'s>(&'s mut self) -> Mutations<'s> {
-        let mut diff_machine = DiffMachine::new(Mutations::new(), todo!());
+        let mut diff_machine = DiffMachine::new(Mutations::new(), todo!(), todo!());
 
         let cur_component = self
             .scheduler
+            .pool
             .get_scope_mut(self.base_scope)
             .expect("The base scope should never be moved");