From mboxrd@z Thu Jan 1 00:00:00 1970 From: David Vrabel Subject: [PATCHv3 2/6] evtchn: defer freeing struct evtchn's until evtchn_destroy_final() Date: Wed, 17 Jun 2015 13:02:59 +0100 Message-ID: <1434542583-28073-3-git-send-email-david.vrabel@citrix.com> References: <1434542583-28073-1-git-send-email-david.vrabel@citrix.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: Received: from mail6.bemta14.messagelabs.com ([193.109.254.103]) by lists.xen.org with esmtp (Exim 4.72) (envelope-from ) id 1Z5C4B-0001aG-C2 for xen-devel@lists.xenproject.org; Wed, 17 Jun 2015 12:03:39 +0000 In-Reply-To: <1434542583-28073-1-git-send-email-david.vrabel@citrix.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xenproject.org Cc: Keir Fraser , Tim Deegan , David Vrabel , Jan Beulich , Ian Campbell List-Id: xen-devel@lists.xenproject.org notify_via_xen_event_channel() and free_xen_event_channel() had to check if the domain was dying because they may be called while the domain is being destroyed and the struct evtchn's are being freed. By deferring the freeing of the struct evtchn's until all references to the domain are dropped, these functions can rely on the channel state being present and valid. Signed-off-by: David Vrabel --- xen/common/event_channel.c | 48 +++++++++++--------------------------------- 1 file changed, 12 insertions(+), 36 deletions(-) diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c index 90e3121..ab3b48e 100644 --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -1175,22 +1175,6 @@ int alloc_unbound_xen_event_channel( void free_xen_event_channel(struct domain *d, int port) { - struct evtchn *chn; - - spin_lock(&d->event_lock); - - if ( unlikely(d->is_dying) ) - { - spin_unlock(&d->event_lock); - return; - } - - BUG_ON(!port_is_valid(d, port)); - chn = evtchn_from_port(d, port); - BUG_ON(!consumer_is_xen(chn)); - - spin_unlock(&d->event_lock); - (void)__evtchn_close(d, port); } @@ -1202,18 +1186,12 @@ void notify_via_xen_event_channel(struct domain *ld, int lport) spin_lock(&ld->event_lock); - if ( unlikely(ld->is_dying) ) - { - spin_unlock(&ld->event_lock); - return; - } - ASSERT(port_is_valid(ld, lport)); lchn = evtchn_from_port(ld, lport); - ASSERT(consumer_is_xen(lchn)); if ( likely(lchn->state == ECS_INTERDOMAIN) ) { + ASSERT(consumer_is_xen(lchn)); rd = lchn->u.interdomain.remote_dom; rchn = evtchn_from_port(rd, lchn->u.interdomain.remote_port); evtchn_port_set_pending(rd, rchn->notify_vcpu_id, rchn); @@ -1279,7 +1257,7 @@ int evtchn_init(struct domain *d) void evtchn_destroy(struct domain *d) { - unsigned int i, j; + unsigned int i; /* After this barrier no new event-channel allocations can occur. */ BUG_ON(!d->is_dying); @@ -1289,8 +1267,17 @@ void evtchn_destroy(struct domain *d) for ( i = 0; port_is_valid(d, i); i++ ) (void)__evtchn_close(d, i); + clear_global_virq_handlers(d); + + evtchn_fifo_destroy(d); +} + + +void evtchn_destroy_final(struct domain *d) +{ + unsigned int i, j; + /* Free all event-channel buckets. */ - spin_lock(&d->event_lock); for ( i = 0; i < NR_EVTCHN_GROUPS; i++ ) { if ( !d->evtchn_group[i] ) @@ -1298,20 +1285,9 @@ void evtchn_destroy(struct domain *d) for ( j = 0; j < BUCKETS_PER_GROUP; j++ ) free_evtchn_bucket(d, d->evtchn_group[i][j]); xfree(d->evtchn_group[i]); - d->evtchn_group[i] = NULL; } free_evtchn_bucket(d, d->evtchn); - d->evtchn = NULL; - spin_unlock(&d->event_lock); - - clear_global_virq_handlers(d); - - evtchn_fifo_destroy(d); -} - -void evtchn_destroy_final(struct domain *d) -{ #if MAX_VIRT_CPUS > BITS_PER_LONG xfree(d->poll_mask); d->poll_mask = NULL; -- 1.7.10.4