Explorar o código

chore: cleanup src

Jonathan Kelley %!s(int64=3) %!d(string=hai) anos
pai
achega
f2e343c

+ 0 - 1
packages/core/src/component.rs

@@ -138,6 +138,5 @@ impl EmptyBuilder {
 /// This utility function launches the builder method so rsx! and html! macros can use the typed-builder pattern
 /// to initialize a component's props.
 pub fn fc_to_builder<'a, T: Properties + 'a>(_: fn(Context<'a>, &'a T) -> Element) -> T::Builder {
-    // pub fn fc_to_builder<'a, T: Properties + 'a>(_: fn(Scope<'a, T>) -> Element) -> T::Builder {
     T::builder()
 }

+ 8 - 34
packages/core/src/diff.rs

@@ -124,32 +124,6 @@ impl<'bump> DiffState<'bump> {
             force_diff: false,
         }
     }
-
-    pub fn try_remove(&self, id: &ScopeId) -> Option<Scope> {
-        todo!()
-    }
-
-    pub fn reserve_node(&self, node: &VNode) -> ElementId {
-        todo!()
-        // let mut els = self.nodes.borrow_mut();
-        // let entry = els.vacant_entry();
-        // let key = entry.key();
-        // let id = ElementId(key);
-        // let node = node as *const _;
-        // let node = unsafe { std::mem::transmute(node) };
-        // entry.insert(node);
-        // id
-
-        // let nodes = self.nodes.borrow_mut();
-        // let id = nodes.insert(());
-        // let node_id = ElementId(id);
-        // node = Some(node_id);
-        // node_id
-    }
-
-    pub fn collect_garbage(&self, id: ElementId) {
-        todo!()
-    }
 }
 
 impl<'bump> DiffState<'bump> {
@@ -283,14 +257,14 @@ impl<'bump> DiffState<'bump> {
     }
 
     fn create_text_node(&mut self, vtext: &'bump VText<'bump>, node: &'bump VNode<'bump>) {
-        let real_id = self.reserve_node(node);
+        let real_id = self.scopes.reserve_node(node);
         self.mutations.create_text_node(vtext.text, real_id);
         vtext.dom_id.set(Some(real_id));
         self.stack.add_child_count(1);
     }
 
     fn create_suspended_node(&mut self, suspended: &'bump VSuspended, node: &'bump VNode<'bump>) {
-        let real_id = self.reserve_node(node);
+        let real_id = self.scopes.reserve_node(node);
         self.mutations.create_placeholder(real_id);
 
         suspended.dom_id.set(Some(real_id));
@@ -300,7 +274,7 @@ impl<'bump> DiffState<'bump> {
     }
 
     fn create_anchor_node(&mut self, anchor: &'bump VAnchor, node: &'bump VNode<'bump>) {
-        let real_id = self.reserve_node(node);
+        let real_id = self.scopes.reserve_node(node);
         self.mutations.create_placeholder(real_id);
         anchor.dom_id.set(Some(real_id));
         self.stack.add_child_count(1);
@@ -317,7 +291,7 @@ impl<'bump> DiffState<'bump> {
             ..
         } = element;
 
-        let real_id = self.reserve_node(node);
+        let real_id = self.scopes.reserve_node(node);
 
         dom_id.set(Some(real_id));
 
@@ -1142,7 +1116,7 @@ impl<'bump> DiffState<'bump> {
             match node {
                 VNode::Text(t) => {
                     let id = t.dom_id.get().unwrap();
-                    self.collect_garbage(id);
+                    self.scopes.collect_garbage(id);
 
                     if gen_muts {
                         self.mutations.remove(id.as_u64());
@@ -1150,7 +1124,7 @@ impl<'bump> DiffState<'bump> {
                 }
                 VNode::Suspended(s) => {
                     let id = s.dom_id.get().unwrap();
-                    self.collect_garbage(id);
+                    self.scopes.collect_garbage(id);
 
                     if gen_muts {
                         self.mutations.remove(id.as_u64());
@@ -1158,7 +1132,7 @@ impl<'bump> DiffState<'bump> {
                 }
                 VNode::Anchor(a) => {
                     let id = a.dom_id.get().unwrap();
-                    self.collect_garbage(id);
+                    self.scopes.collect_garbage(id);
 
                     if gen_muts {
                         self.mutations.remove(id.as_u64());
@@ -1189,7 +1163,7 @@ impl<'bump> DiffState<'bump> {
                     self.remove_nodes(Some(root), gen_muts);
 
                     log::debug!("Destroying scope {:?}", scope_id);
-                    let mut s = self.try_remove(&scope_id).unwrap();
+                    let mut s = self.scopes.try_remove(&scope_id).unwrap();
                     s.hooks.clear_hooks();
                 }
             }

+ 0 - 2
packages/core/src/lib.rs

@@ -22,7 +22,6 @@ pub(crate) mod nodes;
 pub(crate) mod scope;
 pub(crate) mod scopearena;
 pub(crate) mod test_dom;
-pub(crate) mod util;
 pub(crate) mod virtual_dom;
 
 pub(crate) mod innerlude {
@@ -36,7 +35,6 @@ pub(crate) mod innerlude {
     pub use crate::scope::*;
     pub use crate::scopearena::*;
     pub use crate::test_dom::*;
-    pub use crate::util::*;
     pub use crate::virtual_dom::*;
 
     pub type Element = Option<NodeLink>;

+ 23 - 1
packages/core/src/nodes.rs

@@ -4,7 +4,7 @@
 //! cheap and *very* fast to construct - building a full tree should be quick.
 
 use crate::{
-    innerlude::{empty_cell, Context, Element, ElementId, Properties, Scope, ScopeId},
+    innerlude::{Context, Element, Properties, Scope, ScopeId},
     lazynodes::LazyNodes,
 };
 use bumpalo::{boxed::Box as BumpBox, Bump};
@@ -232,6 +232,28 @@ impl Debug for VNode<'_> {
     }
 }
 
+/// An Element's unique identifier.
+///
+/// `ElementId` is a `usize` that is unique across the entire VirtualDOM - but not unique across time. If a component is
+/// unmounted, then the `ElementId` will be reused for a new component.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+pub struct ElementId(pub usize);
+impl std::fmt::Display for ElementId {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        write!(f, "{}", self.0)
+    }
+}
+
+impl ElementId {
+    pub fn as_u64(self) -> u64 {
+        self.0 as u64
+    }
+}
+
+fn empty_cell() -> Cell<Option<ElementId>> {
+    Cell::new(None)
+}
+
 /// A placeholder node only generated when Fragments don't have any children.
 pub struct VAnchor {
     pub dom_id: Cell<Option<ElementId>>,

+ 0 - 106
packages/core/src/old/bumpframe.rs

@@ -1,106 +0,0 @@
-use crate::innerlude::*;
-use bumpalo::Bump;
-use std::cell::Cell;
-
-pub(crate) struct ActiveFrame {
-    pub cur_generation: Cell<usize>,
-
-    // The double-buffering situation that we will use
-    pub frames: [BumpFrame; 2],
-}
-
-impl ActiveFrame {
-    pub fn new() -> Self {
-        let b1 = Bump::new();
-        let b2 = Bump::new();
-
-        let frame_a = BumpFrame {
-            bump: b1,
-            generation: 0.into(),
-        };
-        let frame_b = BumpFrame {
-            bump: b2,
-            generation: 0.into(),
-        };
-
-        Self {
-            frames: [frame_a, frame_b],
-            cur_generation: 0.into(),
-        }
-    }
-
-    pub unsafe fn reset_wip_frame(&self) {
-        // todo: unsafecell or something
-        let bump = self.wip_frame() as *const _ as *mut BumpFrame;
-        let g = &mut *bump;
-        g.bump.reset();
-
-        // self.wip_frame_mut().bump.reset()
-    }
-
-    /// The "work in progress frame" represents the frame that is currently being worked on.
-    pub fn wip_frame(&self) -> &BumpFrame {
-        match self.cur_generation.get() & 1 == 0 {
-            true => &self.frames[0],
-            false => &self.frames[1],
-        }
-    }
-
-    pub fn wip_frame_mut(&mut self) -> &mut BumpFrame {
-        match self.cur_generation.get() & 1 == 0 {
-            true => &mut self.frames[0],
-            false => &mut self.frames[1],
-        }
-    }
-
-    /// The finished frame represents the frame that has been "finished" and cannot be modified again
-    pub fn finished_frame(&self) -> &BumpFrame {
-        match self.cur_generation.get() & 1 == 1 {
-            true => &self.frames[0],
-            false => &self.frames[1],
-        }
-    }
-
-    // /// Give out our self-referential item with our own borrowed lifetime
-    // pub fn fin_head<'b>(&'b self) -> &'b VNode<'b> {
-    //     let cur_head = &self.finished_frame().head_node;
-    //     unsafe { std::mem::transmute::<&VNode<'static>, &VNode<'b>>(cur_head) }
-    // }
-
-    // /// Give out our self-referential item with our own borrowed lifetime
-    // pub fn wip_head<'b>(&'b self) -> &'b VNode<'b> {
-    //     let cur_head = &self.wip_frame().head_node;
-    //     unsafe { std::mem::transmute::<&VNode<'static>, &VNode<'b>>(cur_head) }
-    // }
-
-    pub fn cycle_frame(&mut self) {
-        self.cur_generation.set(self.cur_generation.get() + 1);
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    //! These tests are bad. I don't have a good way of properly testing the ActiveFrame stuff
-    use super::*;
-
-    #[test]
-    fn test_bump_frame() {
-        let mut frames = ActiveFrame::new();
-
-        // just cycle a few times and make sure we get the right frames out
-        for _ in 0..5 {
-            let fin = frames.finished_frame();
-            let wip = frames.wip_frame();
-            assert_eq!(wip._name, "wip");
-            assert_eq!(fin._name, "fin");
-            frames.cycle_frame();
-
-            let fin = frames.finished_frame();
-            let wip = frames.wip_frame();
-            assert_eq!(wip._name, "fin");
-            assert_eq!(fin._name, "wip");
-            frames.cycle_frame();
-        }
-        assert_eq!(frames.cur_generation.get(), 10);
-    }
-}

+ 0 - 93
packages/core/src/old/childiter.rs

@@ -1,93 +0,0 @@
-use crate::innerlude::*;
-
-/// This iterator iterates through a list of virtual children and only returns real children (Elements, Text, Anchors).
-///
-/// This iterator is useful when it's important to load the next real root onto the top of the stack for operations like
-/// "InsertBefore".
-pub(crate) struct RealChildIterator<'a> {
-    scopes: &'a ResourcePool,
-
-    // Heuristically we should never bleed into 4 completely nested fragments/components
-    // Smallvec lets us stack allocate our little stack machine so the vast majority of cases are sane
-    // TODO: use const generics instead of the 4 estimation
-    stack: smallvec::SmallVec<[(u16, &'a VNode<'a>); 4]>,
-}
-
-impl<'a> RealChildIterator<'a> {
-    pub fn new(starter: &'a VNode<'a>, scopes: &'a ResourcePool) -> Self {
-        Self {
-            scopes,
-            stack: smallvec::smallvec![(0, starter)],
-        }
-    }
-}
-
-impl<'a> Iterator for RealChildIterator<'a> {
-    type Item = &'a VNode<'a>;
-
-    fn next(&mut self) -> Option<&'a VNode<'a>> {
-        let mut should_pop = false;
-        let mut returned_node: Option<&'a VNode<'a>> = None;
-        let mut should_push = None;
-
-        while returned_node.is_none() {
-            if let Some((count, node)) = self.stack.last_mut() {
-                match &node {
-                    // We can only exit our looping when we get "real" nodes
-                    // This includes fragments and components when they're empty (have a single root)
-                    VNode::Element(_) | VNode::Text(_) | VNode::Suspended(_) | VNode::Anchor(_) => {
-                        // We've recursed INTO an element/text
-                        // We need to recurse *out* of it and move forward to the next
-                        should_pop = true;
-                        returned_node = Some(node);
-                    }
-
-                    // If we get a fragment we push the next child
-                    VNode::Fragment(frag) => {
-                        let subcount = *count as usize;
-
-                        if frag.children.is_empty() {
-                            should_pop = true;
-                            returned_node = Some(node);
-                        }
-
-                        if subcount >= frag.children.len() {
-                            should_pop = true;
-                        } else {
-                            should_push = Some(&frag.children[subcount]);
-                        }
-                    }
-
-                    // For components, we load their root and push them onto the stack
-                    VNode::Component(sc) => {
-                        let scope = self
-                            .scopes
-                            .get_scope(&sc.associated_scope.get().unwrap())
-                            .unwrap();
-
-                        // Simply swap the current node on the stack with the root of the component
-                        *node = scope.frames.fin_head();
-                    }
-                }
-            } else {
-                // If there's no more items on the stack, we're done!
-                return None;
-            }
-
-            if should_pop {
-                self.stack.pop();
-                if let Some((id, _)) = self.stack.last_mut() {
-                    *id += 1;
-                }
-                should_pop = false;
-            }
-
-            if let Some(push) = should_push {
-                self.stack.push((0, push));
-                should_push = None;
-            }
-        }
-
-        returned_node
-    }
-}

+ 0 - 131
packages/core/src/old/debug_dom.rs

@@ -1,131 +0,0 @@
-use crate::{innerlude::ScopeState, virtual_dom::VirtualDom, VNode};
-
-impl std::fmt::Display for VirtualDom {
-    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-        let base = self.base_scope();
-        let root = base.root_node();
-
-        let renderer = ScopeRenderer {
-            show_fragments: false,
-            skip_components: false,
-
-            _scope: base,
-            _pre_render: false,
-            _newline: true,
-            _indent: true,
-            _max_depth: usize::MAX,
-        };
-
-        renderer.render(self, root, f, 0)
-    }
-}
-
-/// render the scope to a string using the rsx! syntax
-pub(crate) struct ScopeRenderer<'a> {
-    pub skip_components: bool,
-    pub show_fragments: bool,
-    pub _scope: &'a ScopeState,
-    pub _pre_render: bool,
-    pub _newline: bool,
-    pub _indent: bool,
-    pub _max_depth: usize,
-}
-
-// this is more or less a debug tool, but it'll render the entire tree to the terminal
-impl<'a> ScopeRenderer<'a> {
-    pub fn render(
-        &self,
-        vdom: &VirtualDom,
-        node: &VNode,
-        f: &mut std::fmt::Formatter,
-        il: u16,
-    ) -> std::fmt::Result {
-        const INDENT: &str = "    ";
-        let write_indent = |_f: &mut std::fmt::Formatter, le| {
-            for _ in 0..le {
-                write!(_f, "{}", INDENT).unwrap();
-            }
-        };
-
-        match &node {
-            VNode::Linked(_) => {
-                write!(f, "Linked").unwrap();
-            }
-            VNode::Text(text) => {
-                write_indent(f, il);
-                writeln!(f, "\"{}\"", text.text)?
-            }
-            VNode::Anchor(_anchor) => {
-                write_indent(f, il);
-                writeln!(f, "Anchor {{}}")?;
-            }
-            VNode::Element(el) => {
-                write_indent(f, il);
-                writeln!(f, "{} {{", el.tag_name)?;
-                // write!(f, "element: {}", el.tag_name)?;
-                let mut attr_iter = el.attributes.iter().peekable();
-
-                while let Some(attr) = attr_iter.next() {
-                    match attr.namespace {
-                        None => {
-                            //
-                            write_indent(f, il + 1);
-                            writeln!(f, "{}: \"{}\"", attr.name, attr.value)?
-                        }
-
-                        Some(ns) => {
-                            // write the opening tag
-                            write_indent(f, il + 1);
-                            write!(f, " {}:\"", ns)?;
-                            let mut cur_ns_el = attr;
-                            'ns_parse: loop {
-                                write!(f, "{}:{};", cur_ns_el.name, cur_ns_el.value)?;
-                                match attr_iter.peek() {
-                                    Some(next_attr) if next_attr.namespace == Some(ns) => {
-                                        cur_ns_el = attr_iter.next().unwrap();
-                                    }
-                                    _ => break 'ns_parse,
-                                }
-                            }
-                            // write the closing tag
-                            write!(f, "\"")?;
-                        }
-                    }
-                }
-
-                for child in el.children {
-                    self.render(vdom, child, f, il + 1)?;
-                }
-                write_indent(f, il);
-
-                writeln!(f, "}}")?;
-            }
-            VNode::Fragment(frag) => {
-                if self.show_fragments {
-                    write_indent(f, il);
-                    writeln!(f, "Fragment {{")?;
-                    for child in frag.children {
-                        self.render(vdom, child, f, il + 1)?;
-                    }
-                    write_indent(f, il);
-                    writeln!(f, "}}")?;
-                } else {
-                    for child in frag.children {
-                        self.render(vdom, child, f, il)?;
-                    }
-                }
-            }
-            VNode::Component(vcomp) => {
-                let idx = vcomp.associated_scope.get().unwrap();
-                if !self.skip_components {
-                    let new_node = vdom.get_scope(&idx).unwrap().root_node();
-                    self.render(vdom, new_node, f, il)?;
-                }
-            }
-            VNode::Suspended { .. } => {
-                // we can't do anything with suspended nodes
-            }
-        }
-        Ok(())
-    }
-}

+ 0 - 15
packages/core/src/old/events.rs

@@ -1,15 +0,0 @@
-//! An event system that's less confusing than Traits + RC;
-//! This should hopefully make it easier to port to other platforms.
-//!
-//! Unfortunately, it is less efficient than the original, but hopefully it's negligible.
-
-use crate::{
-    innerlude::Listener,
-    innerlude::{ElementId, NodeFactory, ScopeId},
-};
-use bumpalo::boxed::Box as BumpBox;
-use std::{
-    any::Any,
-    cell::{Cell, RefCell},
-    fmt::Debug,
-};

+ 0 - 187
packages/core/src/old/hooks.rs

@@ -1,187 +0,0 @@
-//! Built-in hooks
-//!
-//! This module contains all the low-level built-in hooks that require first party support to work.
-//!
-//! Hooks:
-//! - [`use_hook`]
-//! - [`use_state_provider`]
-//! - [`use_state_consumer`]
-//! - [`use_task`]
-//! - [`use_suspense`]
-
-use crate::innerlude::*;
-use futures_util::FutureExt;
-use std::{any::Any, cell::RefCell, future::Future, ops::Deref, rc::Rc};
-
-// /// Awaits the given task, forcing the component to re-render when the value is ready.
-// ///
-// /// Returns the handle to the task and the value (if it is ready, else None).
-// ///
-// /// ```
-// /// static Example: FC<()> = |(cx, props)| {
-// ///     let (task, value) = use_task(|| async {
-// ///         timer::sleep(Duration::from_secs(1)).await;
-// ///         "Hello World"
-// ///     });
-// ///
-// ///     match contents {
-// ///         Some(contents) => rsx!(cx, div { "{title}" }),
-// ///         None => rsx!(cx, div { "Loading..." }),
-// ///     }
-// /// };
-// /// ```
-// pub fn use_coroutine<'src, Out, Fut, Init>(
-//     cx: Context<'src>,
-//     task_initializer: Init,
-// ) -> (&'src TaskHandle, &'src Option<Out>)
-// where
-//     Out: 'static,
-//     Fut: Future<Output = Out> + 'static,
-//     Init: FnOnce() -> Fut + 'src,
-// {
-//     struct TaskHook<T> {
-//         handle: TaskHandle,
-//         task_dump: Rc<RefCell<Option<T>>>,
-//         value: Option<T>,
-//     }
-
-//     todo!()
-
-//     // // whenever the task is complete, save it into th
-//     // cx.use_hook(
-//     //     move |_| {
-//     //         let task_fut = task_initializer();
-
-//     //         let task_dump = Rc::new(RefCell::new(None));
-
-//     //         let slot = task_dump.clone();
-
-//     //         let updater = cx.schedule_update_any();
-//     //         let originator = cx.scope.our_arena_idx;
-
-//     //         let handle = cx.submit_task(Box::pin(task_fut.then(move |output| async move {
-//     //             *slot.as_ref().borrow_mut() = Some(output);
-//     //             updater(originator);
-//     //             originator
-//     //         })));
-
-//     //         TaskHook {
-//     //             task_dump,
-//     //             value: None,
-//     //             handle,
-//     //         }
-//     //     },
-//     //     |hook| {
-//     //         if let Some(val) = hook.task_dump.as_ref().borrow_mut().take() {
-//     //             hook.value = Some(val);
-//     //         }
-//     //         (&hook.handle, &hook.value)
-//     //     },
-//     //     |_| {},
-//     // )
-// }
-
-/// Asynchronously render new nodes once the given future has completed.
-///
-/// # Easda
-///
-///
-///
-///
-/// # Example
-///
-///
-pub fn use_suspense<'src, Out, Fut, Cb>(
-    cx: Context<'src>,
-    task_initializer: impl FnOnce() -> Fut,
-    user_callback: Cb,
-) -> Element
-where
-    Fut: Future<Output = Out>,
-    Out: 'static,
-    Cb: FnMut(&Out) -> Element + 'src,
-{
-    /*
-    General strategy:
-    - Create a slot for the future to dump its output into
-    - Create a new future feeding off the user's future that feeds the output into that slot
-    - Submit that future as a task
-    - Take the task handle id and attach that to our suspended node
-    - when the hook runs, check if the value exists
-    - if it does, then we can render the node directly
-    - if it doesn't, then we render a suspended node along with with the callback and task id
-    */
-    todo!()
-    // cx.use_hook(
-    // move |_| {
-    //     let value = Rc::new(RefCell::new(None));
-    //     let slot = value.clone();
-    //     let originator = cx.scope.our_arena_idx;
-
-    //     let handle = cx.submit_task(Box::pin(task_initializer().then(
-    //         move |output| async move {
-    //             *slot.borrow_mut() = Some(Box::new(output) as Box<dyn Any>);
-    //             originator
-    //         },
-    //     )));
-
-    //     SuspenseHook { handle, value }
-    // },
-    // move |hook| {
-    //     // If the value exists, just run the callback to get the contents
-    //     // if the value doesn't exist, we want to render a suspended node with an associated callback
-    //     if let Some(value) = hook.value.borrow().as_ref() {
-    //         let out = value.downcast_ref::<Out>().unwrap();
-    //         user_callback(out)
-    //     } else {
-    //         let value = hook.value.clone();
-
-    //         let id = hook.handle.our_id;
-
-    //         let bump = cx.bump();
-
-    //         use bumpalo::boxed::Box as BumpBox;
-
-    //         let f: &mut dyn FnMut() -> Element<'src> = bump.alloc(move || {
-    //             let val = value.borrow();
-
-    //             let out = val
-    //                 .as_ref()
-    //                 .unwrap()
-    //                 .as_ref()
-    //                 .downcast_ref::<Out>()
-    //                 .unwrap();
-
-    //             user_callback(out)
-    //         });
-    //         let callback = unsafe { BumpBox::from_raw(f) };
-
-    //         Some(VNode::Suspended(bump.alloc(VSuspended {
-    //             dom_id: empty_cell(),
-    //             task_id: id,
-    //             callback: RefCell::new(Some(callback)),
-    //         })))
-    //     }
-    // },
-    // |_| {},
-    // )
-}
-
-// pub(crate) struct SuspenseHook {
-//     pub handle: TaskHandle,
-//     pub value: Rc<RefCell<Option<Box<dyn Any>>>>,
-// }
-
-#[derive(Clone, Copy)]
-pub struct NodeRef<'src, T: 'static>(&'src RefCell<Option<T>>);
-
-impl<'a, T> Deref for NodeRef<'a, T> {
-    type Target = RefCell<Option<T>>;
-    fn deref(&self) -> &Self::Target {
-        self.0
-    }
-}
-
-pub fn use_node_ref<T, P>(cx: Context) -> NodeRef<T> {
-    cx.use_hook(|_| RefCell::new(None), |f| NodeRef { 0: f })
-}

+ 0 - 119
packages/core/src/old/noderef.rs

@@ -1,119 +0,0 @@
-// let scope = diff_machine.get_scope_mut(&trigger.originator).unwrap();
-
-// let mut garbage_list = scope.consume_garbage();
-
-// let mut scopes_to_kill = Vec::new();
-// while let Some(node) = garbage_list.pop() {
-//     match &node.kind {
-//         VNodeKind::Text(_) => {
-//             self.shared.collect_garbage(node.direct_id());
-//         }
-//         VNodeKind::Anchor(_) => {
-//             self.shared.collect_garbage(node.direct_id());
-//         }
-//         VNodeKind::Suspended(_) => {
-//             self.shared.collect_garbage(node.direct_id());
-//         }
-
-//         VNodeKind::Element(el) => {
-//             self.shared.collect_garbage(node.direct_id());
-//             for child in el.children {
-//                 garbage_list.push(child);
-//             }
-//         }
-
-//         VNodeKind::Fragment(frag) => {
-//             for child in frag.children {
-//                 garbage_list.push(child);
-//             }
-//         }
-
-//         VNodeKind::Component(comp) => {
-//             // TODO: run the hook destructors and then even delete the scope
-
-//             let scope_id = comp.ass_scope.get().unwrap();
-//             let scope = self.get_scope(scope_id).unwrap();
-//             let root = scope.root();
-//             garbage_list.push(root);
-//             scopes_to_kill.push(scope_id);
-//         }
-//     }
-// }
-
-// for scope in scopes_to_kill {
-//     // oy kill em
-//     log::debug!("should be removing scope {:#?}", scope);
-// }
-
-// // On the primary event queue, there is no batching, we take them off one-by-one
-// let trigger = match receiver.try_next() {
-//     Ok(Some(trigger)) => trigger,
-//     _ => {
-//         // Continuously poll the future pool and the event receiver for work
-//         let mut tasks = self.shared.async_tasks.borrow_mut();
-//         let tasks_tasks = tasks.next();
-
-//         // if the new event generates work more important than our current fiber, we should consider switching
-//         // only switch if it impacts different scopes.
-//         let mut ui_receiver = self.shared.ui_event_receiver.borrow_mut();
-//         let ui_reciv_task = ui_receiver.next();
-
-//         // right now, this polling method will only catch batched set_states that don't get awaited.
-//         // However, in the future, we might be interested in batching set_states across await points
-//         let immediate_tasks = ();
-
-//         futures_util::pin_mut!(tasks_tasks);
-//         futures_util::pin_mut!(ui_reciv_task);
-
-//         // Poll the event receiver and the future pool for work
-//         // Abort early if our deadline has ran out
-//         let mut deadline = (&mut deadline_future).fuse();
-
-//         let trig = futures_util::select! {
-//             trigger = tasks_tasks => trigger,
-//             trigger = ui_reciv_task => trigger,
-
-//             // abort if we're out of time
-//             _ = deadline => { return Ok(diff_machine.mutations); }
-//         };
-
-//         trig.unwrap()
-//     }
-// };
-
-// async fn select_next_event(&mut self) -> Option<EventTrigger> {
-//     let mut receiver = self.shared.task_receiver.borrow_mut();
-
-//     // drain the in-flight events so that we can sort them out with the current events
-//     while let Ok(Some(trigger)) = receiver.try_next() {
-//         log::info!("retrieving event from receiver");
-//         let key = self.shared.make_trigger_key(&trigger);
-//         self.pending_events.insert(key, trigger);
-//     }
-
-//     if self.pending_events.is_empty() {
-//         // Continuously poll the future pool and the event receiver for work
-//         let mut tasks = self.shared.async_tasks.borrow_mut();
-//         let tasks_tasks = tasks.next();
-
-//         let mut receiver = self.shared.task_receiver.borrow_mut();
-//         let reciv_task = receiver.next();
-
-//         futures_util::pin_mut!(tasks_tasks);
-//         futures_util::pin_mut!(reciv_task);
-
-//         let trigger = match futures_util::future::select(tasks_tasks, reciv_task).await {
-//             futures_util::future::Either::Left((trigger, _)) => trigger,
-//             futures_util::future::Either::Right((trigger, _)) => trigger,
-//         }
-//         .unwrap();
-//         let key = self.shared.make_trigger_key(&trigger);
-//         self.pending_events.insert(key, trigger);
-//     }
-
-//     // pop the most important event off
-//     let key = self.pending_events.keys().next().unwrap().clone();
-//     let trigger = self.pending_events.remove(&key).unwrap();
-
-//     Some(trigger)
-// }

+ 0 - 77
packages/core/src/old/resources.rs

@@ -1,77 +0,0 @@
-use crate::innerlude::*;
-use slab::Slab;
-
-use std::{cell::UnsafeCell, rc::Rc};
-#[derive(Clone)]
-pub(crate) struct ResourcePool {
-    /*
-    This *has* to be an UnsafeCell.
-
-    Each BumpFrame and Scope is located in this Slab - and we'll need mutable access to a scope while holding on to
-    its bumpframe contents immutably.
-
-    However, all of the interaction with this Slab is done in this module and the Diff module, so it should be fairly
-    simple to audit.
-
-    Wrapped in Rc so the "get_shared_context" closure can walk the tree (immutably!)
-    */
-    pub components: Rc<UnsafeCell<Slab<ScopeInner>>>,
-
-    /*
-    Yes, a slab of "nil". We use this for properly ordering ElementIDs - all we care about is the allocation strategy
-    that slab uses. The slab essentially just provides keys for ElementIDs that we can re-use in a Vec on the client.
-
-    This just happened to be the simplest and most efficient way to implement a deterministic keyed map with slot reuse.
-
-    In the future, we could actually store a pointer to the VNode instead of nil to provide O(1) lookup for VNodes...
-    */
-    pub raw_elements: Rc<UnsafeCell<Slab<*const VNode<'static>>>>,
-
-    pub channel: EventChannel,
-}
-
-impl ResourcePool {
-    /// this is unsafe because the caller needs to track which other scopes it's already using
-    pub fn get_scope(&self, idx: &ScopeId) -> Option<&ScopeInner> {
-        let inner = unsafe { &*self.components.get() };
-        inner.get(idx.0)
-    }
-
-    /// this is unsafe because the caller needs to track which other scopes it's already using
-    pub fn get_scope_mut(&self, idx: &ScopeId) -> Option<&mut ScopeInner> {
-        let inner = unsafe { &mut *self.components.get() };
-        inner.get_mut(idx.0)
-    }
-
-    pub fn try_remove(&self, id: &ScopeId) -> Option<ScopeInner> {
-        let inner = unsafe { &mut *self.components.get() };
-        Some(inner.remove(id.0))
-        // .try_remove(id.0)
-        // .ok_or_else(|| Error::FatalInternal("Scope not found"))
-    }
-
-    pub fn reserve_node<'a>(&self, node: &'a VNode<'a>) -> ElementId {
-        let els = unsafe { &mut *self.raw_elements.get() };
-        let entry = els.vacant_entry();
-        let key = entry.key();
-        let id = ElementId(key);
-        let node = node as *const _;
-        let node = unsafe { std::mem::transmute(node) };
-        entry.insert(node);
-        id
-    }
-
-    /// return the id, freeing the space of the original node
-    pub fn collect_garbage(&self, id: ElementId) {
-        let els = unsafe { &mut *self.raw_elements.get() };
-        els.remove(id.0);
-    }
-
-    pub fn insert_scope_with_key(&self, f: impl FnOnce(ScopeId) -> ScopeInner) -> ScopeId {
-        let g = unsafe { &mut *self.components.get() };
-        let entry = g.vacant_entry();
-        let id = ScopeId(entry.key());
-        entry.insert(f(id));
-        id
-    }
-}

+ 0 - 597
packages/core/src/old/scheduler.rs

@@ -1,597 +0,0 @@
-/*
-Welcome to Dioxus's cooperative, priority-based scheduler.
-
-I hope you enjoy your stay.
-
-Some essential reading:
-- https://github.com/facebook/react/blob/main/packages/scheduler/src/forks/Scheduler.js#L197-L200
-- https://github.com/facebook/react/blob/main/packages/scheduler/src/forks/Scheduler.js#L440
-- https://github.com/WICG/is-input-pending
-- https://web.dev/rail/
-- https://indepth.dev/posts/1008/inside-fiber-in-depth-overview-of-the-new-reconciliation-algorithm-in-react
-
-# What's going on?
-
-Dioxus is a framework for "user experience" - not just "user interfaces." Part of the "experience" is keeping the UI
-snappy and "jank free" even under heavy work loads. Dioxus already has the "speed" part figured out - but there's no
-point in being "fast" if you can't also be "responsive."
-
-As such, Dioxus can manually decide on what work is most important at any given moment in time. With a properly tuned
-priority system, Dioxus can ensure that user interaction is prioritized and committed as soon as possible (sub 100ms).
-The controller responsible for this priority management is called the "scheduler" and is responsible for juggling many
-different types of work simultaneously.
-
-# How does it work?
-
-Per the RAIL guide, we want to make sure that A) inputs are handled ASAP and B) animations are not blocked.
-React-three-fiber is a testament to how amazing this can be - a ThreeJS scene is threaded in between work periods of
-React, and the UI still stays snappy!
-
-While it's straightforward to run code ASAP and be as "fast as possible", what's not  _not_ straightforward is how to do
-this while not blocking the main thread. The current prevailing thought is to stop working periodically so the browser
-has time to paint and run animations. When the browser is finished, we can step in and continue our work.
-
-React-Fiber uses the "Fiber" concept to achieve a pause-resume functionality. This is worth reading up on, but not
-necessary to understand what we're doing here. In Dioxus, our DiffMachine is guided by DiffInstructions - essentially
-"commands" that guide the Diffing algorithm through the tree. Our "diff_scope" method is async - we can literally pause
-our DiffMachine "mid-sentence" (so to speak) by just stopping the poll on the future. The DiffMachine periodically yields
-so Rust's async machinery can take over, allowing us to customize when exactly to pause it.
-
-React's "should_yield" method is more complex than ours, and I assume we'll move in that direction as Dioxus matures. For
-now, Dioxus just assumes a TimeoutFuture, and selects! on both the Diff algorithm and timeout. If the DiffMachine finishes
-before the timeout, then Dioxus will work on any pending work in the interim. If there is no pending work, then the changes
-are committed, and coroutines are polled during the idle period. However, if the timeout expires, then the DiffMachine
-future is paused and saved (self-referentially).
-
-# Priority System
-
-So far, we've been able to thread our Dioxus work between animation frames - the main thread is not blocked! But that
-doesn't help us _under load_. How do we still stay snappy... even if we're doing a lot of work? Well, that's where
-priorities come into play. The goal with priorities is to schedule shorter work as a "high" priority and longer work as
-a "lower" priority. That way, we can interrupt long-running low-priority work with short-running high-priority work.
-
-React's priority system is quite complex.
-
-There are 5 levels of priority and 2 distinctions between UI events (discrete, continuous). I believe React really only
-uses 3 priority levels and "idle" priority isn't used... Regardless, there's some batching going on.
-
-For Dioxus, we're going with a 4 tier priority system:
-- Sync: Things that need to be done by the next frame, like TextInput on controlled elements
-- High: for events that block all others - clicks, keyboard, and hovers
-- Medium: for UI events caused by the user but not directly - scrolls/forms/focus (all other events)
-- Low: set_state called asynchronously, and anything generated by suspense
-
-In "Sync" state, we abort our "idle wait" future, and resolve the sync queue immediately and escape. Because we completed
-work before the next rAF, any edits can be immediately processed before the frame ends. Generally though, we want to leave
-as much time to rAF as possible. "Sync" is currently only used by onInput - we'll leave some docs telling people not to
-do anything too arduous from onInput.
-
-For the rest, we defer to the rIC period and work down each queue from high to low.
-*/
-
-use crate::innerlude::*;
-use bumpalo::Bump;
-use futures_channel::mpsc::{UnboundedReceiver, UnboundedSender};
-use futures_util::{pin_mut, stream::FuturesUnordered, Future, FutureExt, StreamExt};
-use fxhash::FxHashMap;
-use fxhash::FxHashSet;
-use indexmap::IndexSet;
-use slab::Slab;
-use std::pin::Pin;
-use std::task::Poll;
-use std::{
-    any::{Any, TypeId},
-    cell::{Cell, UnsafeCell},
-    collections::{HashSet, VecDeque},
-    rc::Rc,
-};
-
-#[derive(Clone)]
-pub(crate) struct EventChannel {
-    pub sender: UnboundedSender<SchedulerMsg>,
-    pub get_shared_context: GetSharedContext,
-}
-
-pub type GetSharedContext = Rc<dyn Fn(ScopeId, TypeId) -> Option<Rc<dyn Any>>>;
-
-pub enum SchedulerMsg {
-    // events from the host
-    UiEvent(UserEvent),
-
-    // setstate
-    Immediate(ScopeId),
-}
-
-/// The scheduler holds basically everything around "working"
-///
-/// Each scope has the ability to lightly interact with the scheduler (IE, schedule an update) but ultimately the scheduler calls the components.
-///
-/// In Dioxus, the scheduler provides 4 priority levels - each with their own "DiffMachine". The DiffMachine state can be saved if the deadline runs
-/// out.
-///
-/// Saved DiffMachine state can be self-referential, so we need to be careful about how we save it. All self-referential data is a link between
-/// pending DiffInstructions, Mutations, and their underlying Scope. It's okay for us to be self-referential with this data, provided we don't priority
-/// task shift to a higher priority task that needs mutable access to the same scopes.
-///
-/// We can prevent this safety issue from occurring if we track which scopes are invalidated when starting a new task.
-///
-/// There's a lot of raw pointers here...
-///
-/// Since we're building self-referential structures for each component, we need to make sure that the referencs stay stable
-/// The best way to do that is a bump allocator.
-///
-///
-///
-pub(crate) struct Scheduler {
-    // /// All mounted components are arena allocated to make additions, removals, and references easy to work with
-    // /// A generational arena is used to re-use slots of deleted scopes without having to resize the underlying arena.
-    // ///
-    // /// This is wrapped in an UnsafeCell because we will need to get mutable access to unique values in unique bump arenas
-    // /// and rusts's guarantees cannot prove that this is safe. We will need to maintain the safety guarantees manually.
-    // pub pool: ResourcePool,
-    //
-    pub component_arena: Bump,
-
-    pub free_components: VecDeque<*mut ScopeInner>,
-
-    pub heuristics: FxHashMap<FcSlot, Heuristic>,
-
-    pub receiver: UnboundedReceiver<SchedulerMsg>,
-
-    // Garbage stored
-    pub pending_garbage: FxHashSet<ScopeId>,
-
-    // Every component that has futures that need to be polled
-    pub pending_futures: FxHashSet<ScopeId>,
-
-    // // scheduler stuff
-    // pub current_priority: EventPriority,
-    pub ui_events: VecDeque<UserEvent>,
-
-    pub pending_immediates: VecDeque<ScopeId>,
-
-    pub batched_events: VecDeque<UserEvent>,
-
-    pub garbage_scopes: HashSet<ScopeId>,
-
-    pub dirty_scopes: IndexSet<ScopeId>,
-
-    pub saved_state: Option<SavedDiffWork<'static>>,
-
-    pub in_progress: bool,
-}
-
-pub type FcSlot = *const ();
-
-pub struct Heuristic {
-    hook_arena_size: usize,
-    node_arena_size: usize,
-}
-
-impl Scheduler {
-    pub(crate) fn new(
-        sender: UnboundedSender<SchedulerMsg>,
-        receiver: UnboundedReceiver<SchedulerMsg>,
-        component_capacity: usize,
-        element_capacity: usize,
-    ) -> Self {
-        /*
-        Preallocate 2000 elements and 100 scopes to avoid dynamic allocation.
-        Perhaps this should be configurable from some external config?
-        */
-
-        // let components = Rc::new(UnsafeCell::new(Slab::with_capacity(component_capacity)));
-        let raw_elements = Rc::new(UnsafeCell::new(Slab::with_capacity(element_capacity)));
-
-        let channel = EventChannel {
-            sender: sender.clone(),
-            get_shared_context: {
-                todo!()
-                // let components = components.clone();
-                // Rc::new(move |id, ty| {
-                //     let components = unsafe { &*components.get() };
-                //     let mut search: Option<&ScopeInner> = components.get(id.0);
-                //     while let Some(inner) = search.take() {
-                //         if let Some(shared) = inner.shared_contexts.borrow().get(&ty) {
-                //             return Some(shared.clone());
-                //         } else {
-                //             search = inner.parent_idx.map(|id| components.get(id.0)).flatten();
-                //         }
-                //     }
-                //     None
-                // })
-            },
-        };
-
-        // let pool = ResourcePool {
-        //     components,
-        //     raw_elements,
-        //     channel,
-        // };
-
-        let saved_state = SavedDiffWork {
-            mutations: Mutations::new(),
-            stack: DiffStack::new(),
-            seen_scopes: Default::default(),
-        };
-
-        Self {
-            // pool,
-            receiver,
-
-            pending_garbage: FxHashSet::default(),
-
-            ui_events: VecDeque::new(),
-
-            pending_immediates: VecDeque::new(),
-
-            batched_events: VecDeque::new(),
-
-            garbage_scopes: HashSet::new(),
-
-            pending_futures: Default::default(),
-            dirty_scopes: Default::default(),
-            saved_state: Some(saved_state),
-            in_progress: false,
-
-            heuristics: todo!(),
-        }
-    }
-
-    // returns true if the event is discrete
-    pub fn handle_ui_event(&mut self, event: UserEvent) -> bool {
-        let (discrete, priority) = event_meta(&event);
-
-        if let Some(scope) = self.get_scope_mut(&event.scope) {
-            if let Some(element) = event.mounted_dom_id {
-                // TODO: bubble properly here
-                scope.call_listener(event, element);
-
-                while let Ok(Some(dirty_scope)) = self.receiver.try_next() {
-                    //
-                    //     self.add_dirty_scope(dirty_scope, trigger.priority)
-                }
-            }
-        }
-
-        // use EventPriority::*;
-
-        // match priority {
-        //     Immediate => todo!(),
-        //     High => todo!(),
-        //     Medium => todo!(),
-        //     Low => todo!(),
-        // }
-
-        discrete
-    }
-
-    fn prepare_work(&mut self) {
-        // while let Some(trigger) = self.ui_events.pop_back() {
-        //     if let Some(scope) = self.get_scope_mut(&trigger.scope) {}
-        // }
-    }
-
-    // nothing to do, no events on channels, no work
-    pub fn has_any_work(&self) -> bool {
-        !(self.dirty_scopes.is_empty() && self.ui_events.is_empty())
-    }
-
-    /// re-balance the work lanes, ensuring high-priority work properly bumps away low priority work
-    fn balance_lanes(&mut self) {}
-
-    fn save_work(&mut self, lane: SavedDiffWork) {
-        let saved: SavedDiffWork<'static> = unsafe { std::mem::transmute(lane) };
-        self.saved_state = Some(saved);
-    }
-
-    unsafe fn load_work(&mut self) -> SavedDiffWork<'static> {
-        self.saved_state.take().unwrap().extend()
-    }
-
-    pub fn handle_channel_msg(&mut self, msg: SchedulerMsg) {
-        match msg {
-            SchedulerMsg::Immediate(_) => todo!(),
-
-            SchedulerMsg::UiEvent(event) => {
-                //
-
-                let (discrete, priority) = event_meta(&event);
-
-                if let Some(scope) = self.get_scope_mut(&event.scope) {
-                    if let Some(element) = event.mounted_dom_id {
-                        // TODO: bubble properly here
-                        scope.call_listener(event, element);
-
-                        while let Ok(Some(dirty_scope)) = self.receiver.try_next() {
-                            //
-                            //     self.add_dirty_scope(dirty_scope, trigger.priority)
-                        }
-                    }
-                }
-
-                discrete;
-            }
-        }
-    }
-
-    /// Load the current lane, and work on it, periodically checking in if the deadline has been reached.
-    ///
-    /// Returns true if the lane is finished before the deadline could be met.
-    pub fn work_on_current_lane(
-        &mut self,
-        deadline_reached: impl FnMut() -> bool,
-        mutations: &mut Vec<Mutations>,
-    ) -> bool {
-        // Work through the current subtree, and commit the results when it finishes
-        // When the deadline expires, give back the work
-        let saved_state = unsafe { self.load_work() };
-
-        // We have to split away some parts of ourself - current lane is borrowed mutably
-        let shared = self.clone();
-        let mut machine = unsafe { saved_state.promote(&shared) };
-
-        let mut ran_scopes = FxHashSet::default();
-
-        if machine.stack.is_empty() {
-            let shared = self.clone();
-
-            self.dirty_scopes
-                .retain(|id| shared.get_scope(id).is_some());
-            self.dirty_scopes.sort_by(|a, b| {
-                let h1 = shared.get_scope(a).unwrap().height;
-                let h2 = shared.get_scope(b).unwrap().height;
-                h1.cmp(&h2).reverse()
-            });
-
-            if let Some(scopeid) = self.dirty_scopes.pop() {
-                log::info!("handling dirty scope {:?}", scopeid);
-                if !ran_scopes.contains(&scopeid) {
-                    ran_scopes.insert(scopeid);
-                    log::debug!("about to run scope {:?}", scopeid);
-
-                    if let Some(component) = self.get_scope_mut(&scopeid) {
-                        if component.run_scope(&self) {
-                            let (old, new) =
-                                (component.frames.wip_head(), component.frames.fin_head());
-                            // let (old, new) = (component.frames.wip_head(), component.frames.fin_head());
-                            machine.stack.scope_stack.push(scopeid);
-                            machine.stack.push(DiffInstruction::Diff { new, old });
-                        }
-                    }
-                }
-            }
-        }
-
-        let work_completed = machine.work(deadline_reached);
-
-        // log::debug!("raw edits {:?}", machine.mutations.edits);
-
-        let mut machine: DiffMachine<'static> = unsafe { std::mem::transmute(machine) };
-        // let mut saved = machine.save();
-
-        if work_completed {
-            for node in machine.seen_scopes.drain() {
-                // self.dirty_scopes.clear();
-                // self.ui_events.clear();
-                self.dirty_scopes.remove(&node);
-                // self.dirty_scopes.remove(&node);
-            }
-
-            let mut new_mutations = Mutations::new();
-
-            for edit in machine.mutations.edits.drain(..) {
-                new_mutations.edits.push(edit);
-            }
-
-            // for edit in saved.edits.drain(..) {
-            //     new_mutations.edits.push(edit);
-            // }
-
-            // std::mem::swap(&mut new_mutations, &mut saved.mutations);
-
-            mutations.push(new_mutations);
-
-            // log::debug!("saved edits {:?}", mutations);
-
-            let mut saved = machine.save();
-            self.save_work(saved);
-            true
-
-            // self.save_work(saved);
-            // false
-        } else {
-            false
-        }
-    }
-
-    /// The primary workhorse of the VirtualDOM.
-    ///
-    /// Uses some fairly complex logic to schedule what work should be produced.
-    ///
-    /// Returns a list of successful mutations.
-    pub fn work_with_deadline<'a>(
-        &'a mut self,
-        mut deadline: impl FnMut() -> bool,
-    ) -> Vec<Mutations<'a>> {
-        /*
-        Strategy:
-        - When called, check for any UI events that might've been received since the last frame.
-        - Dump all UI events into a "pending discrete" queue and a "pending continuous" queue.
-
-        - If there are any pending discrete events, then elevate our priority level. If our priority level is already "high,"
-            then we need to finish the high priority work first. If the current work is "low" then analyze what scopes
-            will be invalidated by this new work. If this interferes with any in-flight medium or low work, then we need
-            to bump the other work out of the way, or choose to process it so we don't have any conflicts.
-            'static components have a leg up here since their work can be re-used among multiple scopes.
-            "High priority" is only for blocking! Should only be used on "clicks"
-
-        - If there are no pending discrete events, then check for continuous events. These can be completely batched
-
-        - we batch completely until we run into a discrete event
-        - all continuous events are batched together
-        - so D C C C C C would be two separate events - D and C. IE onclick and onscroll
-        - D C C C C C C D C C C D would be D C D C D in 5 distinct phases.
-
-        - !listener bubbling is not currently implemented properly and will need to be implemented somehow in the future
-            - we need to keep track of element parents to be able to traverse properly
-
-
-        Open questions:
-        - what if we get two clicks from the component during the same slice?
-            - should we batch?
-            - react says no - they are continuous
-            - but if we received both - then we don't need to diff, do we? run as many as we can and then finally diff?
-        */
-        let mut committed_mutations = Vec::<Mutations<'static>>::new();
-
-        while self.has_any_work() {
-            while let Ok(Some(msg)) = self.receiver.try_next() {
-                match msg {
-                    SchedulerMsg::Immediate(im) => {
-                        self.dirty_scopes.insert(im);
-                    }
-                    SchedulerMsg::UiEvent(evt) => {
-                        self.ui_events.push_back(evt);
-                    }
-                }
-            }
-
-            // switch our priority, pop off any work
-            while let Some(event) = self.ui_events.pop_front() {
-                if let Some(scope) = self.get_scope_mut(&event.scope) {
-                    if let Some(element) = event.mounted_dom_id {
-                        log::info!("Calling listener {:?}, {:?}", event.scope, element);
-
-                        // TODO: bubble properly here
-                        scope.call_listener(event, element);
-
-                        while let Ok(Some(dirty_scope)) = self.receiver.try_next() {
-                            match dirty_scope {
-                                SchedulerMsg::Immediate(im) => {
-                                    self.dirty_scopes.insert(im);
-                                }
-                                SchedulerMsg::UiEvent(e) => self.ui_events.push_back(e),
-                            }
-                        }
-                    }
-                }
-            }
-
-            let work_complete = self.work_on_current_lane(&mut deadline, &mut committed_mutations);
-
-            if !work_complete {
-                return committed_mutations;
-            }
-        }
-
-        committed_mutations
-    }
-
-    /// Work the scheduler down, not polling any ongoing tasks.
-    ///
-    /// Will use the standard priority-based scheduling, batching, etc, but just won't interact with the async reactor.
-    pub fn work_sync<'a>(&'a mut self) -> Vec<Mutations<'a>> {
-        let mut committed_mutations = Vec::new();
-
-        while let Ok(Some(msg)) = self.receiver.try_next() {
-            self.handle_channel_msg(msg);
-        }
-
-        if !self.has_any_work() {
-            return committed_mutations;
-        }
-
-        while self.has_any_work() {
-            self.prepare_work();
-            self.work_on_current_lane(|| false, &mut committed_mutations);
-        }
-
-        committed_mutations
-    }
-
-    /// Restart the entire VirtualDOM from scratch, wiping away any old state and components.
-    ///
-    /// Typically used to kickstart the VirtualDOM after initialization.
-    pub fn rebuild(&mut self, base_scope: ScopeId) -> Mutations {
-        let mut shared = self.clone();
-        let mut diff_machine = DiffMachine::new(Mutations::new(), &mut shared);
-
-        // TODO: drain any in-flight work
-        let cur_component = self
-            .pool
-            .get_scope_mut(&base_scope)
-            .expect("The base scope should never be moved");
-
-        log::debug!("rebuild {:?}", base_scope);
-
-        // We run the component. If it succeeds, then we can diff it and add the changes to the dom.
-        if cur_component.run_scope(&self) {
-            diff_machine
-                .stack
-                .create_node(cur_component.frames.fin_head(), MountType::Append);
-
-            diff_machine.stack.scope_stack.push(base_scope);
-
-            diff_machine.work(|| false);
-        } else {
-            // todo: should this be a hard error?
-            log::warn!(
-                "Component failed to run successfully during rebuild.
-                This does not result in a failed rebuild, but indicates a logic failure within your app."
-            );
-        }
-
-        unsafe { std::mem::transmute(diff_machine.mutations) }
-    }
-
-    pub fn hard_diff(&mut self, base_scope: ScopeId) -> Mutations {
-        let cur_component = self
-            .pool
-            .get_scope_mut(&base_scope)
-            .expect("The base scope should never be moved");
-
-        log::debug!("hard diff {:?}", base_scope);
-
-        if cur_component.run_scope(&self) {
-            let mut diff_machine = DiffMachine::new(Mutations::new(), &mut self);
-            diff_machine.cfg.force_diff = true;
-            diff_machine.diff_scope(base_scope);
-            diff_machine.mutations
-        } else {
-            Mutations::new()
-        }
-    }
-}
-
-impl Future for Scheduler {
-    type Output = ();
-
-    fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll<Self::Output> {
-        let mut all_pending = true;
-
-        for fut in self.pending_futures.iter() {
-            let scope = self
-                .pool
-                .get_scope_mut(&fut)
-                .expect("Scope should never be moved");
-
-            let items = scope.items.get_mut();
-            for task in items.tasks.iter_mut() {
-                let t = task.as_mut();
-                let g = unsafe { Pin::new_unchecked(t) };
-                match g.poll(cx) {
-                    Poll::Ready(r) => {
-                        all_pending = false;
-                    }
-                    Poll::Pending => {}
-                }
-            }
-        }
-
-        match all_pending {
-            true => Poll::Pending,
-            false => Poll::Ready(()),
-        }
-    }
-}

+ 0 - 73
packages/core/src/old/threadsafe.rs

@@ -1,73 +0,0 @@
-//! A threadsafe wrapper for the VirtualDom
-//!
-//! This is an experimental module, and must be explicitly opted-into.
-//!
-//! It's not guaranteed that this module produces safe results, so use at your own peril.
-//!
-//! The only real "right" answer to a Send VirtualDom is by ensuring all hook data is Send
-//!
-//!
-use std::sync::{Arc, Mutex, MutexGuard};
-
-use crate::VirtualDom;
-
-/// A threadsafe wrapper for the Dioxus VirtualDom.
-///
-/// The Dioxus VirtualDom is not normally `Send` because user code can contain non-`Send` types. However, it is important
-/// to have a VirtualDom that is `Send` when used in server-side code since very few web frameworks support non-send
-/// handlers.
-///
-/// To address this, we have the `ThreadsafeVirtualDom` type which is a threadsafe wrapper for the VirtualDom. To access
-/// the VirtualDom, it must be first unlocked using the `lock` method. This locks the VirtualDom through a mutex and
-/// prevents any user code from leaking out. It is not possible to acquire any non-`Send` types from inside the VirtualDom.
-///
-/// The only way data may be accessed through the VirtualDom is from the "root props" method or by accessing a `Scope`
-/// directly. Even then, it's not possible to access any hook data. This means that non-Send types are only "in play"
-/// while the VirtualDom is locked with a non-Send marker.
-///
-/// Calling "wait for work" on the ThreadsafeVirtualDom does indeed work, because this method only accesses `Send` types.
-/// Otherwise, the VirtualDom must be unlocked on the current thread to modify any data.
-///
-/// Dioxus does have the concept of local tasks and non-local tasks.
-///
-/// For the ThreadsafeVirtualDom, non-Send tasks are not ran - and will error out during a Debug build if one is submitted.
-///
-///
-///
-/// When Tasks are submitted to a thread-local executor,
-///
-pub struct ThreadsafeVirtualDom {
-    inner: Arc<Mutex<VirtualDom>>,
-}
-
-impl ThreadsafeVirtualDom {
-    pub fn new(inner: VirtualDom) -> Self {
-        let inner = Arc::new(Mutex::new(inner));
-        Self { inner }
-    }
-
-    pub fn lock(&self) -> Option<VirtualDomGuard> {
-        let locked = self.inner.lock().unwrap();
-        Some(VirtualDomGuard { guard: locked })
-    }
-}
-
-unsafe impl Send for ThreadsafeVirtualDom {}
-
-pub struct VirtualDomGuard<'a> {
-    guard: MutexGuard<'a, VirtualDom>,
-}
-
-impl<'a> std::ops::Deref for VirtualDomGuard<'a> {
-    type Target = MutexGuard<'a, VirtualDom>;
-
-    fn deref(&self) -> &Self::Target {
-        &self.guard
-    }
-}
-
-impl<'a> std::ops::DerefMut for VirtualDomGuard<'a> {
-    fn deref_mut(&mut self) -> &mut Self::Target {
-        &mut self.guard
-    }
-}

+ 8 - 0
packages/core/src/scope.rs

@@ -94,6 +94,14 @@ pub struct SelfReferentialItems<'a> {
     pub(crate) pending_effects: Vec<BumpBox<'a, dyn FnMut()>>,
 }
 
+/// A component's unique identifier.
+///
+/// `ScopeId` is a `usize` that is unique across the entire VirtualDOM - but not unique across time. If a component is
+/// unmounted, then the `ScopeId` will be reused for a new component.
+#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+pub struct ScopeId(pub usize);
+
 // Public methods exposed to libraries and components
 impl Scope {
     /// Get the subtree ID that this scope belongs to.

+ 0 - 34
packages/core/src/util.rs

@@ -1,34 +0,0 @@
-use std::cell::Cell;
-use std::fmt::Display;
-
-// create a cell with a "none" value
-#[inline]
-pub fn empty_cell() -> Cell<Option<ElementId>> {
-    Cell::new(None)
-}
-
-/// A component's unique identifier.
-///
-/// `ScopeId` is a `usize` that is unique across the entire VirtualDOM - but not unique across time. If a component is
-/// unmounted, then the `ScopeId` will be reused for a new component.
-#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
-#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
-pub struct ScopeId(pub usize);
-
-/// An Element's unique identifier.
-///
-/// `ElementId` is a `usize` that is unique across the entire VirtualDOM - but not unique across time. If a component is
-/// unmounted, then the `ElementId` will be reused for a new component.
-#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
-pub struct ElementId(pub usize);
-impl Display for ElementId {
-    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-        write!(f, "{}", self.0)
-    }
-}
-
-impl ElementId {
-    pub fn as_u64(self) -> u64 {
-        self.0 as u64
-    }
-}

+ 4 - 9
packages/core/src/virtual_dom.rs

@@ -447,18 +447,13 @@ impl VirtualDom {
 
         let scope_id = self.base_scope;
         if self.scopes.run_scope(&scope_id) {
-            todo!();
-            // diff_state
-            //     .stack
-            //     .create_node(self.scopes.fin_head(&scope_id), MountType::Append);
+            diff_state
+                .stack
+                .create_node(self.scopes.fin_head(&scope_id), MountType::Append);
 
-            // diff_state.stack.scope_stack.push(scope_id);
+            diff_state.stack.scope_stack.push(scope_id);
 
             let work_completed = diff_state.work(|| false);
-            // let scopes = &mut self.scopes;
-            //
-            // let work_completed = crate::diff::work(&mut diff_state, || false);
-            // self.scopes.work(&mut diff_machine, || false);
         }
 
         diff_state.mutations