Explorar o código

Merge branch 'main' into jk/fix-form-inputs

Jonathan Kelley hai 1 ano
pai
achega
44833c471f

+ 7 - 4
.github/ISSUE_TEMPLATE/feature_requst.md

@@ -3,14 +3,17 @@ name: Feature Request
 about: If you have any interesting advice, you can tell us.
 ---
 
-## Specific Demand
+## Feature Request
 
 <!--
-What feature do you need, please describe it in detail.
+Describe the issue in detail and why we should add it. To help us out, please poke through our issue tracker and make sure it's not a duplicate issue.
+
+
+Please add the corresponding labels to the issue.
 -->
 
 ## Implement Suggestion
 
 <!--
-If you have any suggestion for complete this feature, you can tell us.
--->
+If you have any suggestions on how to design this feature or any prior art, list them here.
+-->

+ 3 - 2
packages/core/src/arena.rs

@@ -1,4 +1,5 @@
-use crate::{innerlude::DirtyScope, virtual_dom::VirtualDom, ScopeId};
+use crate::innerlude::ScopeOrder;
+use crate::{virtual_dom::VirtualDom, ScopeId};
 
 /// An Element's unique identifier.
 ///
@@ -74,7 +75,7 @@ impl VirtualDom {
             context.height
         };
 
-        self.dirty_scopes.remove(&DirtyScope { height, id });
+        self.dirty_scopes.remove(&ScopeOrder::new(height, id));
     }
 }
 

+ 2 - 5
packages/core/src/diff/component.rs

@@ -2,7 +2,7 @@ use std::ops::{Deref, DerefMut};
 
 use crate::{
     any_props::AnyProps,
-    innerlude::{DirtyScope, ElementRef, MountId, VComponent, WriteMutations},
+    innerlude::{ElementRef, MountId, ScopeOrder, VComponent, WriteMutations},
     nodes::RenderReturn,
     nodes::VNode,
     scopes::ScopeId,
@@ -91,10 +91,7 @@ impl VNode {
         dom.diff_scope(to, scope_id, new);
 
         let height = dom.runtime.get_state(scope_id).unwrap().height;
-        dom.dirty_scopes.remove(&DirtyScope {
-            height,
-            id: scope_id,
-        });
+        dom.dirty_scopes.remove(&ScopeOrder::new(height, scope_id));
     }
 
     fn replace_vcomponent(

+ 201 - 11
packages/core/src/dirty_scope.rs

@@ -1,33 +1,223 @@
-use std::hash::Hash;
+//! Dioxus resolves scopes in a specific order to avoid unexpected behavior. All tasks are resolved in the order of height. Scopes that are higher up in the tree are resolved first.
+//! When a scope that is higher up in the tree is rerendered, it may drop scopes lower in the tree along with their tasks.
+//!
+//! ```rust
+//! use dioxus::prelude::*;
+//!
+//! fn app() -> Element {
+//!     let vec = use_signal(|| vec![0; 10]);
+//!     rsx! {
+//!         // If the length of the vec shrinks we need to make sure that the children are dropped along with their tasks the new state of the vec is read
+//!         for idx in 0..vec.len() {
+//!             Child { idx, vec }
+//!         }
+//!     }
+//! }
+//!
+//! #[component]
+//! fn Child(vec: Signal<Vec<usize>>, idx: usize) -> Element {
+//!     use_hook(move || {
+//!         spawn(async move {
+//!             // If we let this task run after the child is dropped, it will panic.
+//!             println!("Task {}", vec.read()[idx]);
+//!         });
+//!     });
+//!
+//!     rsx! {}
+//! }
+//! ```
 
 use crate::ScopeId;
+use crate::Task;
+use std::borrow::Borrow;
+use std::cell::RefCell;
+use std::collections::BTreeSet;
+use std::hash::Hash;
 
-#[derive(Debug, Clone, Eq)]
-pub struct DirtyScope {
-    pub height: u32,
-    pub id: ScopeId,
+#[derive(Debug, Clone, Copy, Eq)]
+pub struct ScopeOrder {
+    pub(crate) height: u32,
+    pub(crate) id: ScopeId,
+}
+
+impl ScopeOrder {
+    pub fn new(height: u32, id: ScopeId) -> Self {
+        Self { height, id }
+    }
+}
+
+impl PartialEq for ScopeOrder {
+    fn eq(&self, other: &Self) -> bool {
+        self.id == other.id
+    }
 }
 
-impl PartialOrd for DirtyScope {
+impl PartialOrd for ScopeOrder {
     fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
         Some(self.cmp(other))
     }
 }
 
-impl Ord for DirtyScope {
+impl Ord for ScopeOrder {
     fn cmp(&self, other: &Self) -> std::cmp::Ordering {
         self.height.cmp(&other.height).then(self.id.cmp(&other.id))
     }
 }
 
-impl PartialEq for DirtyScope {
+impl Hash for ScopeOrder {
+    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
+        self.id.hash(state);
+    }
+}
+
+#[derive(Debug, Default)]
+pub struct DirtyScopes {
+    pub(crate) scopes: BTreeSet<ScopeOrder>,
+    pub(crate) tasks: BTreeSet<DirtyTasks>,
+}
+
+impl DirtyScopes {
+    /// Queue a task to be polled
+    pub fn queue_task(&mut self, task: Task, order: ScopeOrder) {
+        match self.tasks.get(&order) {
+            Some(scope) => scope.queue_task(task),
+            None => {
+                let scope = DirtyTasks::from(order);
+                scope.queue_task(task);
+                self.tasks.insert(scope);
+            }
+        }
+    }
+
+    /// Queue a scope to be rerendered
+    pub fn queue_scope(&mut self, order: ScopeOrder) {
+        self.scopes.insert(order);
+    }
+
+    /// Check if there are any dirty scopes
+    pub fn has_dirty_scopes(&self) -> bool {
+        !self.scopes.is_empty()
+    }
+
+    /// Take any tasks from the highest scope
+    pub fn pop_task(&mut self) -> Option<DirtyTasks> {
+        self.tasks.pop_first()
+    }
+
+    /// Take any work from the highest scope. This may include rerunning the scope and/or running tasks
+    pub fn pop_work(&mut self) -> Option<Work> {
+        let dirty_scope = self.scopes.first();
+        let dirty_task = self.tasks.first();
+        match (dirty_scope, dirty_task) {
+            (Some(scope), Some(task)) => {
+                let tasks_order = task.borrow();
+                match scope.cmp(tasks_order) {
+                    std::cmp::Ordering::Less => {
+                        let scope = self.scopes.pop_first().unwrap();
+                        Some(Work {
+                            scope,
+                            rerun_scope: true,
+                            tasks: Vec::new(),
+                        })
+                    }
+                    std::cmp::Ordering::Greater => {
+                        let task = self.tasks.pop_first().unwrap();
+                        Some(Work {
+                            scope: task.order,
+                            rerun_scope: false,
+                            tasks: task.tasks_queued.into_inner(),
+                        })
+                    }
+                    std::cmp::Ordering::Equal => {
+                        let scope = self.scopes.pop_first().unwrap();
+                        let task = self.tasks.pop_first().unwrap();
+                        Some(Work {
+                            scope,
+                            rerun_scope: true,
+                            tasks: task.tasks_queued.into_inner(),
+                        })
+                    }
+                }
+            }
+            (Some(_), None) => {
+                let scope = self.scopes.pop_first().unwrap();
+                Some(Work {
+                    scope,
+                    rerun_scope: true,
+                    tasks: Vec::new(),
+                })
+            }
+            (None, Some(_)) => {
+                let task = self.tasks.pop_first().unwrap();
+                Some(Work {
+                    scope: task.order,
+                    rerun_scope: false,
+                    tasks: task.tasks_queued.into_inner(),
+                })
+            }
+            (None, None) => None,
+        }
+    }
+
+    pub fn remove(&mut self, scope: &ScopeOrder) {
+        self.scopes.remove(scope);
+    }
+}
+
+#[derive(Debug)]
+pub struct Work {
+    pub scope: ScopeOrder,
+    pub rerun_scope: bool,
+    pub tasks: Vec<Task>,
+}
+
+#[derive(Debug, Clone, Eq)]
+pub(crate) struct DirtyTasks {
+    pub order: ScopeOrder,
+    pub tasks_queued: RefCell<Vec<Task>>,
+}
+
+impl From<ScopeOrder> for DirtyTasks {
+    fn from(order: ScopeOrder) -> Self {
+        Self {
+            order,
+            tasks_queued: Vec::new().into(),
+        }
+    }
+}
+
+impl DirtyTasks {
+    pub fn queue_task(&self, task: Task) {
+        self.tasks_queued.borrow_mut().push(task);
+    }
+}
+
+impl Borrow<ScopeOrder> for DirtyTasks {
+    fn borrow(&self) -> &ScopeOrder {
+        &self.order
+    }
+}
+
+impl PartialOrd for DirtyTasks {
+    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+        Some(self.order.cmp(&other.order))
+    }
+}
+
+impl Ord for DirtyTasks {
+    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+        self.order.cmp(&other.order)
+    }
+}
+
+impl PartialEq for DirtyTasks {
     fn eq(&self, other: &Self) -> bool {
-        self.id == other.id
+        self.order == other.order
     }
 }
 
-impl Hash for DirtyScope {
+impl Hash for DirtyTasks {
     fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
-        self.id.hash(state);
+        self.order.hash(state);
     }
 }

+ 4 - 5
packages/core/src/global_context.rs

@@ -250,19 +250,18 @@ pub fn after_render(f: impl FnMut() + 'static) {
     Runtime::with_current_scope(|cx| cx.push_after_render(f));
 }
 
-/// Wait for the virtualdom to finish its sync work before proceeding
+/// Wait for the next render to complete
 ///
 /// This is useful if you've just triggered an update and want to wait for it to finish before proceeding with valid
 /// DOM nodes.
 ///
-/// Effects rely on this to ensure that they only run effects after the DOM has been updated. Without flush_sync effects
+/// Effects rely on this to ensure that they only run effects after the DOM has been updated. Without wait_for_next_render effects
 /// are run immediately before diffing the DOM, which causes all sorts of out-of-sync weirdness.
-pub async fn flush_sync() {
+pub async fn wait_for_next_render() {
     // Wait for the flush lock to be available
     // We release it immediately, so it's impossible for the lock to be held longer than this function
-    Runtime::with(|rt| rt.flush_mutex.clone())
+    Runtime::with(|rt| rt.render_signal.subscribe())
         .unwrap()
-        .lock()
         .await;
 }
 

+ 8 - 7
packages/core/src/lib.rs

@@ -14,6 +14,7 @@ mod global_context;
 mod mutations;
 mod nodes;
 mod properties;
+mod render_signal;
 mod runtime;
 mod scope_arena;
 mod scope_context;
@@ -87,14 +88,14 @@ pub use crate::innerlude::{
 /// This includes types like [`Element`], and [`Component`].
 pub mod prelude {
     pub use crate::innerlude::{
-        consume_context, consume_context_from_scope, current_scope_id, fc_to_builder, flush_sync,
-        generation, has_context, needs_update, needs_update_any, parent_scope, provide_context,
+        consume_context, consume_context_from_scope, current_scope_id, fc_to_builder, generation,
+        has_context, needs_update, needs_update_any, parent_scope, provide_context,
         provide_root_context, remove_future, schedule_update, schedule_update_any, spawn,
         spawn_forever, suspend, try_consume_context, use_after_render, use_before_render, use_drop,
-        use_error_boundary, use_hook, use_hook_with_cleanup, AnyValue, Attribute, Component,
-        ComponentFunction, Element, ErrorBoundary, Event, EventHandler, Fragment, HasAttributes,
-        IntoAttributeValue, IntoDynNode, OptionStringFromMarker, Properties, Runtime, RuntimeGuard,
-        ScopeId, ScopeState, SuperFrom, SuperInto, Task, Template, TemplateAttribute, TemplateNode,
-        Throw, VNode, VNodeInner, VirtualDom,
+        use_error_boundary, use_hook, use_hook_with_cleanup, wait_for_next_render, AnyValue,
+        Attribute, Component, ComponentFunction, Element, ErrorBoundary, Event, EventHandler,
+        Fragment, HasAttributes, IntoAttributeValue, IntoDynNode, OptionStringFromMarker,
+        Properties, Runtime, RuntimeGuard, ScopeId, ScopeState, SuperFrom, SuperInto, Task,
+        Template, TemplateAttribute, TemplateNode, Throw, VNode, VNodeInner, VirtualDom,
     };
 }

+ 62 - 0
packages/core/src/render_signal.rs

@@ -0,0 +1,62 @@
+//! In dioxus, effects are run using normal async functions after a render. [RenderSignalFuture] is a future that resolves after a render has passed.
+
+use std::cell::RefCell;
+use std::future::Future;
+use std::pin::Pin;
+use std::rc::Rc;
+use std::task::Context;
+use std::task::Poll;
+use std::task::Waker;
+
+/// A signal is a message that can be sent to all listening tasks at once
+#[derive(Default)]
+pub struct RenderSignal {
+    wakers: Rc<RefCell<Vec<Rc<RefCell<RenderSignalFutureInner>>>>>,
+}
+
+impl RenderSignal {
+    /// Send the signal to all listening tasks
+    pub fn send(&self) {
+        let mut wakers = self.wakers.borrow_mut();
+        for waker in wakers.drain(..) {
+            let mut inner = waker.borrow_mut();
+            inner.resolved = true;
+            if let Some(waker) = inner.waker.take() {
+                waker.wake();
+            }
+        }
+    }
+
+    /// Create a future that resolves when the signal is sent
+    pub fn subscribe(&self) -> RenderSignalFuture {
+        let inner = Rc::new(RefCell::new(RenderSignalFutureInner {
+            resolved: false,
+            waker: None,
+        }));
+        self.wakers.borrow_mut().push(inner.clone());
+        RenderSignalFuture { inner }
+    }
+}
+
+struct RenderSignalFutureInner {
+    resolved: bool,
+    waker: Option<Waker>,
+}
+
+pub(crate) struct RenderSignalFuture {
+    inner: Rc<RefCell<RenderSignalFutureInner>>,
+}
+
+impl Future for RenderSignalFuture {
+    type Output = ();
+
+    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
+        let mut inner = self.inner.borrow_mut();
+        if inner.resolved {
+            Poll::Ready(())
+        } else {
+            inner.waker = Some(cx.waker().clone());
+            Poll::Pending
+        }
+    }
+}

+ 4 - 35
packages/core/src/runtime.rs

@@ -1,5 +1,6 @@
 use crate::{
     innerlude::{LocalTask, SchedulerMsg},
+    render_signal::RenderSignal,
     scope_context::Scope,
     scopes::ScopeId,
     Task,
@@ -7,7 +8,6 @@ use crate::{
 use std::{
     cell::{Cell, Ref, RefCell},
     rc::Rc,
-    sync::Arc,
 };
 
 thread_local! {
@@ -31,18 +31,15 @@ pub struct Runtime {
 
     pub(crate) sender: futures_channel::mpsc::UnboundedSender<SchedulerMsg>,
 
-    // the virtualdom will hold this lock while it's doing syncronous work
-    // when the lock is lifted, tasks waiting for the lock will be able to run
-    pub(crate) flush_mutex: Arc<futures_util::lock::Mutex<()>>,
-    pub(crate) flush_lock: Cell<Option<futures_util::lock::OwnedMutexGuard<()>>>,
+    // Synchronous tasks need to be run after the next render. The virtual dom stores a list of those tasks to send a signal to them when the next render is done.
+    pub(crate) render_signal: RenderSignal,
 }
 
 impl Runtime {
     pub(crate) fn new(sender: futures_channel::mpsc::UnboundedSender<SchedulerMsg>) -> Rc<Self> {
         Rc::new(Self {
             sender,
-            flush_mutex: Default::default(),
-            flush_lock: Default::default(),
+            render_signal: RenderSignal::default(),
             rendering: Cell::new(true),
             scope_states: Default::default(),
             scope_stack: Default::default(),
@@ -149,34 +146,6 @@ impl Runtime {
     pub(crate) fn with_scope<R>(scope: ScopeId, f: impl FnOnce(&Scope) -> R) -> Option<R> {
         Self::with(|rt| rt.get_state(scope).map(|sc| f(&sc))).flatten()
     }
-
-    /// Acquire the flush lock and store it interally
-    ///
-    /// This means the virtual dom is currently doing syncronous work
-    /// The lock will be held until `release_flush_lock` is called - and then the OwnedLock will be dropped
-    pub(crate) fn acquire_flush_lock(&self) {
-        // The flush lock might already be held...
-        if let Some(lock) = self.flush_mutex.try_lock_owned() {
-            self.flush_lock.set(Some(lock));
-        }
-    }
-
-    /// Release the flush lock
-    ///
-    /// On the drop of the flush lock, all tasks waiting on `flush_sync` will spring to life via their wakers.
-    /// You can now freely poll those tasks and they can progress
-    pub(crate) fn release_flush_lock(&self) {
-        self.flush_lock.take();
-    }
-
-    /// Dispatch an event against the current runtime
-    ///
-    /// This won't do any diffing or anything, just calling event listeners syncronously
-    /// We expose this via the runtime so that preventDefault, stopPropagation, and other event methods can be called
-    /// while the event is still active.
-    pub fn dispatch_event(&self) {
-        todo!()
-    }
 }
 
 /// A guard for a new runtime. This must be used to override the current runtime when importing components from a dynamic library that has it's own runtime.

+ 4 - 5
packages/core/src/scope_arena.rs

@@ -1,6 +1,7 @@
+use crate::innerlude::ScopeOrder;
 use crate::{
     any_props::{AnyProps, BoxedAnyProps},
-    innerlude::{DirtyScope, ScopeState},
+    innerlude::ScopeState,
     nodes::RenderReturn,
     scope_context::Scope,
     scopes::ScopeId,
@@ -66,10 +67,8 @@ impl VirtualDom {
         context.render_count.set(context.render_count.get() + 1);
 
         // remove this scope from dirty scopes
-        self.dirty_scopes.remove(&DirtyScope {
-            height: context.height,
-            id: context.id,
-        });
+        self.dirty_scopes
+            .remove(&ScopeOrder::new(context.height, scope_id));
 
         if context.suspended.get() {
             if matches!(new_nodes, RenderReturn::Aborted(_)) {

+ 4 - 0
packages/core/src/tasks.rs

@@ -135,6 +135,10 @@ impl Runtime {
         self.tasks.borrow().get(task.0)?.parent
     }
 
+    pub(crate) fn task_scope(&self, task: Task) -> Option<ScopeId> {
+        self.tasks.borrow().get(task.0).map(|t| t.scope)
+    }
+
     pub(crate) fn handle_task_wakeup(&self, id: Task) -> Poll<()> {
         debug_assert!(Runtime::current().is_some(), "Must be in a dioxus runtime");
 

+ 87 - 42
packages/core/src/virtual_dom.rs

@@ -2,12 +2,14 @@
 //!
 //! This module provides the primary mechanics to create a hook-based, concurrent VDOM for Rust.
 
+use crate::innerlude::ScopeOrder;
+use crate::Task;
 use crate::{
     any_props::AnyProps,
     arena::ElementId,
     innerlude::{
-        DirtyScope, ElementRef, ErrorBoundary, NoOpMutations, SchedulerMsg, ScopeState, VNodeMount,
-        VProps, WriteMutations,
+        DirtyScopes, ElementRef, ErrorBoundary, NoOpMutations, SchedulerMsg, ScopeState,
+        VNodeMount, VProps, WriteMutations,
     },
     nodes::RenderReturn,
     nodes::{Template, TemplateId},
@@ -18,7 +20,7 @@ use crate::{
 use futures_util::StreamExt;
 use rustc_hash::{FxHashMap, FxHashSet};
 use slab::Slab;
-use std::{any::Any, collections::BTreeSet, rc::Rc};
+use std::{any::Any, rc::Rc};
 use tracing::instrument;
 
 /// A virtual node system that progresses user events and diffs UI trees.
@@ -183,7 +185,7 @@ use tracing::instrument;
 pub struct VirtualDom {
     pub(crate) scopes: Slab<ScopeState>,
 
-    pub(crate) dirty_scopes: BTreeSet<DirtyScope>,
+    pub(crate) dirty_scopes: DirtyScopes,
 
     // Maps a template path to a map of byte indexes to templates
     pub(crate) templates: FxHashMap<TemplateId, FxHashMap<usize, Template>>,
@@ -376,17 +378,29 @@ impl VirtualDom {
             return;
         };
 
+        tracing::event!(tracing::Level::TRACE, "Marking scope {:?} as dirty", id);
+        let order = ScopeOrder::new(scope.height(), id);
+        self.dirty_scopes.queue_scope(order);
+    }
+
+    /// Mark a task as dirty
+    fn mark_task_dirty(&mut self, task: Task) {
+        let Some(scope) = self.runtime.task_scope(task) else {
+            return;
+        };
+        let Some(scope) = self.runtime.get_state(scope) else {
+            return;
+        };
+
         tracing::event!(
             tracing::Level::TRACE,
-            "Marking scope {:?} ({}) as dirty",
-            id,
-            scope.name
+            "Marking task {:?} (spawned in {:?}) as dirty",
+            task,
+            scope.id
         );
 
-        self.dirty_scopes.insert(DirtyScope {
-            height: scope.height(),
-            id,
-        });
+        let order = ScopeOrder::new(scope.height(), scope.id);
+        self.dirty_scopes.queue_task(task, order);
     }
 
     /// Call a listener inside the VirtualDom with data from outside the VirtualDom. **The ElementId passed in must be the id of an element with a listener, not a static node or a text node.**
@@ -438,50 +452,73 @@ impl VirtualDom {
         self.poll_tasks().await;
     }
 
-    ///
+    /// Poll the scheduler for any work
     #[instrument(skip(self), level = "trace", name = "VirtualDom::poll_tasks")]
     async fn poll_tasks(&mut self) {
-        // Release the flush lock
-        // This will cause all the flush wakers to immediately spring to life, which we will off with process_events
-        self.runtime.release_flush_lock();
-
         loop {
             // Process all events - Scopes are marked dirty, etc
             // Sometimes when wakers fire we get a slew of updates at once, so its important that we drain this completely
             self.process_events();
 
             // Now that we have collected all queued work, we should check if we have any dirty scopes. If there are not, then we can poll any queued futures
-            if !self.dirty_scopes.is_empty() {
+            if self.dirty_scopes.has_dirty_scopes() {
                 return;
             }
 
             // Make sure we set the runtime since we're running user code
             let _runtime = RuntimeGuard::new(self.runtime.clone());
 
-            // Hold a lock to the flush sync to prevent tasks from running in the event we get an immediate
-            // When we're doing awaiting the rx, the lock will be dropped and tasks waiting on the lock will get waked
-            // We have to own the lock since poll_tasks is cancel safe - the future that this is running in might get dropped
-            // and if we held the lock in the scope, the lock would also get dropped prematurely
-            self.runtime.release_flush_lock();
-            self.runtime.acquire_flush_lock();
-
+            // There isn't any more work we can do synchronously. Wait for any new work to be ready
             match self.rx.next().await.expect("channel should never close") {
                 SchedulerMsg::Immediate(id) => self.mark_dirty(id),
-                SchedulerMsg::TaskNotified(id) => _ = self.runtime.handle_task_wakeup(id),
+                SchedulerMsg::TaskNotified(id) => {
+                    // Instead of running the task immediately, we insert it into the runtime's task queue.
+                    // The task may be marked dirty at the same time as the scope that owns the task is dropped.
+                    self.mark_task_dirty(id);
+                }
             };
         }
     }
 
+    /// Queue any pending events
+    fn queue_events(&mut self) {
+        // Prevent a task from deadlocking the runtime by repeatedly queueing itself
+        while let Ok(Some(msg)) = self.rx.try_next() {
+            match msg {
+                SchedulerMsg::Immediate(id) => self.mark_dirty(id),
+                SchedulerMsg::TaskNotified(task) => self.mark_task_dirty(task),
+            }
+        }
+    }
+
     /// Process all events in the queue until there are no more left
     #[instrument(skip(self), level = "trace", name = "VirtualDom::process_events")]
     pub fn process_events(&mut self) {
         let _runtime = RuntimeGuard::new(self.runtime.clone());
+        self.queue_events();
 
-        // Prevent a task from deadlocking the runtime by repeatedly queueing itself
-        while let Ok(Some(msg)) = self.rx.try_next() {
-            match msg {
-                SchedulerMsg::Immediate(id) => self.mark_dirty(id),
-                SchedulerMsg::TaskNotified(task) => _ = self.runtime.handle_task_wakeup(task),
+        // Now that we have collected all queued work, we should check if we have any dirty scopes. If there are not, then we can poll any queued futures
+        if self.dirty_scopes.has_dirty_scopes() {
+            return;
+        }
+
+        // Next, run any queued tasks
+        // We choose not to poll the deadline since we complete pretty quickly anyways
+        while let Some(task) = self.dirty_scopes.pop_task() {
+            // If the scope doesn't exist for whatever reason, then we should skip it
+            if !self.scopes.contains(task.order.id.0) {
+                continue;
+            }
+
+            // Then poll any tasks that might be pending
+            let tasks = task.tasks_queued.into_inner();
+            for task in tasks {
+                let _ = self.runtime.handle_task_wakeup(task);
+                // Running that task, may mark a scope higher up as dirty. If it does, return from the function early
+                self.queue_events();
+                if self.dirty_scopes.has_dirty_scopes() {
+                    return;
+                }
             }
         }
     }
@@ -496,20 +533,19 @@ impl VirtualDom {
     pub fn replace_template(&mut self, template: Template) {
         self.register_template_first_byte_index(template);
         // iterating a slab is very inefficient, but this is a rare operation that will only happen during development so it's fine
-        for (_, scope) in self.scopes.iter() {
+        let mut dirty = Vec::new();
+        for (id, scope) in self.scopes.iter() {
             if let Some(RenderReturn::Ready(sync)) = scope.try_root_node() {
                 if sync.template.get().name.rsplit_once(':').unwrap().0
                     == template.name.rsplit_once(':').unwrap().0
                 {
-                    let context = scope.state();
-                    let height = context.height;
-                    self.dirty_scopes.insert(DirtyScope {
-                        height,
-                        id: context.id,
-                    });
+                    dirty.push(ScopeId(id));
                 }
             }
         }
+        for dirty in dirty {
+            self.mark_dirty(dirty);
+        }
     }
 
     /// Rebuild the virtualdom without handling any of the mutations
@@ -572,20 +608,29 @@ impl VirtualDom {
 
         // Next, diff any dirty scopes
         // We choose not to poll the deadline since we complete pretty quickly anyways
-        while let Some(dirty) = self.dirty_scopes.pop_first() {
+        while let Some(work) = self.dirty_scopes.pop_work() {
             // If the scope doesn't exist for whatever reason, then we should skip it
-            if !self.scopes.contains(dirty.id.0) {
+            if !self.scopes.contains(work.scope.id.0) {
                 continue;
             }
 
             {
                 let _runtime = RuntimeGuard::new(self.runtime.clone());
-                // Run the scope and get the mutations
-                let new_nodes = self.run_scope(dirty.id);
+                // Then, poll any tasks that might be pending in the scope
+                // This will run effects, so this **must** be done after the scope is diffed
+                for task in work.tasks {
+                    let _ = self.runtime.handle_task_wakeup(task);
+                }
+                // If the scope is dirty, run the scope and get the mutations
+                if work.rerun_scope {
+                    let new_nodes = self.run_scope(work.scope.id);
 
-                self.diff_scope(to, dirty.id, new_nodes);
+                    self.diff_scope(to, work.scope.id, new_nodes);
+                }
             }
         }
+
+        self.runtime.render_signal.send();
     }
 
     /// [`Self::render_immediate`] to a vector of mutations for testing purposes

+ 52 - 0
packages/core/tests/children_drop_futures.rs

@@ -0,0 +1,52 @@
+//! Verify that when children are dropped, they drop their futures before they are polled
+
+use std::{sync::atomic::AtomicUsize, time::Duration};
+
+use dioxus::prelude::*;
+
+#[tokio::test]
+async fn child_futures_drop_first() {
+    static POLL_COUNT: AtomicUsize = AtomicUsize::new(0);
+
+    fn app() -> Element {
+        if generation() == 0 {
+            rsx! {Child {}}
+        } else {
+            rsx! {}
+        }
+    }
+
+    #[component]
+    fn Child() -> Element {
+        // Spawn a task that will increment POLL_COUNT every 10 milliseconds
+        // This should be dropped after the second time the parent is run
+        use_hook(|| {
+            spawn(async {
+                POLL_COUNT.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
+            });
+        });
+
+        rsx! {}
+    }
+
+    let mut dom = VirtualDom::new(app);
+
+    dom.rebuild(&mut dioxus_core::NoOpMutations);
+
+    // Here the parent and task could resolve at the same time, but because the task is in the child, dioxus should run the parent first because the child might be dropped
+    dom.mark_dirty(ScopeId::ROOT);
+
+    tokio::select! {
+        _ = dom.wait_for_work() => {}
+        _ = tokio::time::sleep(Duration::from_millis(500)) => panic!("timed out")
+    };
+
+    dom.render_immediate(&mut dioxus_core::NoOpMutations);
+
+    // By the time the tasks are finished, we should've accumulated ticks from two tasks
+    // Be warned that by setting the delay to too short, tokio might not schedule in the tasks
+    assert_eq!(
+        POLL_COUNT.fetch_add(0, std::sync::atomic::Ordering::Relaxed),
+        0
+    );
+}

+ 52 - 11
packages/core/tests/task.rs

@@ -84,18 +84,29 @@ async fn yield_now_works() {
     SEQUENCE.with(|s| assert_eq!(s.borrow().len(), 20));
 }
 
-/// Ensure that calling wait_for_flush waits for dioxus to finish its syncrhonous work
+/// Ensure that calling wait_for_flush waits for dioxus to finish its synchronous work
 #[tokio::test]
 async fn flushing() {
     thread_local! {
         static SEQUENCE: std::cell::RefCell<Vec<usize>> = std::cell::RefCell::new(Vec::new());
+        static BROADCAST: (tokio::sync::broadcast::Sender<()>, tokio::sync::broadcast::Receiver<()>) = tokio::sync::broadcast::channel(1);
     }
 
     fn app() -> Element {
+        if generation() > 0 {
+            println!("App");
+            SEQUENCE.with(|s| s.borrow_mut().push(0));
+        }
+
+        // The next two tasks mimic effects. They should only be run after the app has been rendered
         use_hook(|| {
             spawn(async move {
+                let mut channel = BROADCAST.with(|b| b.1.resubscribe());
                 for _ in 0..10 {
-                    flush_sync().await;
+                    wait_for_next_render().await;
+                    println!("Task 1 recved");
+                    channel.recv().await.unwrap();
+                    println!("Task 1");
                     SEQUENCE.with(|s| s.borrow_mut().push(1));
                 }
             })
@@ -103,14 +114,18 @@ async fn flushing() {
 
         use_hook(|| {
             spawn(async move {
+                let mut channel = BROADCAST.with(|b| b.1.resubscribe());
                 for _ in 0..10 {
-                    flush_sync().await;
+                    wait_for_next_render().await;
+                    println!("Task 2 recved");
+                    channel.recv().await.unwrap();
+                    println!("Task 2");
                     SEQUENCE.with(|s| s.borrow_mut().push(2));
                 }
             })
         });
 
-        rsx!({})
+        rsx! {}
     }
 
     let mut dom = VirtualDom::new(app);
@@ -119,18 +134,44 @@ async fn flushing() {
 
     let fut = async {
         // Trigger the flush by waiting for work
-        for _ in 0..40 {
-            tokio::select! {
-                _ = dom.wait_for_work() => {}
-                _ = tokio::time::sleep(Duration::from_millis(1)) => {}
-            };
+        for i in 0..10 {
+            BROADCAST.with(|b| b.0.send(()).unwrap());
+            dom.mark_dirty(ScopeId(0));
+            dom.wait_for_work().await;
+            dom.render_immediate(&mut dioxus_core::NoOpMutations);
+            println!("Flushed {}", i);
         }
+        BROADCAST.with(|b| b.0.send(()).unwrap());
+        dom.wait_for_work().await;
     };
 
     tokio::select! {
         _ = fut => {}
-        _ = tokio::time::sleep(Duration::from_millis(500)) => {}
+        _ = tokio::time::sleep(Duration::from_millis(500)) => {
+            println!("Aborting due to timeout");
+        }
     };
 
-    SEQUENCE.with(|s| assert_eq!(s.borrow().len(), 20));
+    SEQUENCE.with(|s| {
+        let s = s.borrow();
+        println!("{:?}", s);
+        assert_eq!(s.len(), 30);
+        // We need to check if every three elements look like [0, 1, 2] or [0, 2, 1]
+        let mut has_seen_1 = false;
+        for (i, &x) in s.iter().enumerate() {
+            let stage = i % 3;
+            if stage == 0 {
+                assert_eq!(x, 0);
+            } else if stage == 1 {
+                assert!(x == 1 || x == 2);
+                has_seen_1 = x == 1;
+            } else if stage == 2 {
+                if has_seen_1 {
+                    assert_eq!(x, 2);
+                } else {
+                    assert_eq!(x, 1);
+                }
+            }
+        }
+    });
 }

+ 3 - 3
packages/hooks/src/use_effect.rs

@@ -28,14 +28,14 @@ pub fn use_effect(mut callback: impl FnMut() + 'static) {
         spawn(async move {
             let rc = ReactiveContext::new_with_origin(location);
             loop {
-                // Wait for the dom the be finished with sync work
-                // flush_sync().await;
-
                 // Run the effect
                 rc.run_in(&mut callback);
 
                 // Wait for context to change
                 rc.changed().await;
+
+                // Wait for the dom the be finished with sync work
+                wait_for_next_render().await;
             }
         });
     });

+ 3 - 7
packages/hooks/src/use_future.rs

@@ -1,16 +1,13 @@
 #![allow(missing_docs)]
 use crate::{use_callback, use_hook_did_run, use_signal, UseCallback};
-use dioxus_core::{
-    prelude::{flush_sync, spawn, use_hook},
-    Task,
-};
+use dioxus_core::{prelude::*, Task};
 use dioxus_signals::*;
 use dioxus_signals::{Readable, Writable};
 use std::future::Future;
 
 /// A hook that allows you to spawn a future.
 /// This future will **not** run on the server
-/// The future is spawned on the next call to `flush_sync` which means that it will not run on the server.
+/// The future is spawned on the next call to `wait_for_next_render` which means that it will not run on the server.
 /// To run a future on the server, you should use `spawn` directly.
 /// `use_future` **won't return a value**.
 /// If you want to return a value from a future, use `use_resource` instead.
@@ -45,14 +42,13 @@ where
     let mut callback = use_callback(move || {
         let fut = future();
         spawn(async move {
-            flush_sync().await;
             state.set(UseFutureState::Pending);
             fut.await;
             state.set(UseFutureState::Ready);
         })
     });
 
-    // Create the task inside a copyvalue so we can reset it in-place later
+    // Create the task inside a CopyValue so we can reset it in-place later
     let task = use_hook(|| CopyValue::new(callback.call()));
 
     // Early returns in dioxus have consequences for use_memo, use_resource, and use_future, etc

+ 1 - 5
packages/hooks/src/use_memo.rs

@@ -48,7 +48,7 @@ pub fn use_maybe_sync_memo<R: PartialEq, S: Storage<SignalData<R>>>(
     mut f: impl FnMut() -> R + 'static,
 ) -> ReadOnlySignal<R, S> {
     use_hook(|| {
-        // Get the current reactive context
+        // Create a new reactive context for the memo
         let rc = ReactiveContext::new();
 
         // Create a new signal in that context, wiring up its dependencies and subscribers
@@ -56,8 +56,6 @@ pub fn use_maybe_sync_memo<R: PartialEq, S: Storage<SignalData<R>>>(
 
         spawn(async move {
             loop {
-                // Wait for the dom the be finished with sync work
-                flush_sync().await;
                 rc.changed().await;
                 let new = rc.run_in(&mut f);
                 if new != *state.peek() {
@@ -137,8 +135,6 @@ where
 
         spawn(async move {
             loop {
-                // Wait for the dom the be finished with sync work
-                flush_sync().await;
                 rc.changed().await;
 
                 let new = rc.run_in(|| f(dependencies_signal.read().clone()));

+ 1 - 2
packages/signals/src/signal.rs

@@ -3,7 +3,7 @@ use crate::{
     ReadOnlySignal, ReadableRef,
 };
 use dioxus_core::{
-    prelude::{flush_sync, spawn, IntoAttributeValue},
+    prelude::{spawn, IntoAttributeValue},
     ScopeId,
 };
 use generational_box::{AnyStorage, Storage, SyncStorage, UnsyncStorage};
@@ -107,7 +107,6 @@ impl<T: PartialEq + 'static> Signal<T> {
 
         spawn(async move {
             loop {
-                flush_sync().await;
                 rc.changed().await;
                 let new = f();
                 if new != *state.peek() {