[Kos-cvs] kos/modules/vmm _vmm.h, 1.19, 1.20 _vmm_as.c, 1.25, 1.26 _vmm_map.c, 1.6, 1.7 vmm.c, 1.19, 1.20 vmm.h, 1.27, 1.28

thomas at kos.enix.org thomas at kos.enix.org
Wed Dec 29 20:28:53 CET 2004


Update of /home/kos/cvs/kos/modules/vmm
In directory the-doors:/tmp/cvs-serv1745/modules/vmm

Modified Files:
	_vmm.h _vmm_as.c _vmm_map.c vmm.c vmm.h 
Log Message:
2004-12-29  Thomas Petazzoni  <thomas at crazy.kos.nx>

	* modules/x86/task/_thread_cpu_context.c
	(init_user_thread_context): VMM functions now work with address
	spaces.

	* modules/x86/mm/_vmap.c (arch_do_unmap_virtual_page): Unmapping a
	page that hasn't any corresponding PT is not an error. So, the
	ASSERT_FATAL() was removed, and replaced by a test that returns
	SUCCESS if there's not PT associated to the page.

	* modules/vmm/vmm.h: VMM functions now operate on address space
	not on teams.

	* modules/vmm/vmm.c: Try to limit the exports only to the modules
	that really need them.

	* modules/vmm/_vmm_map.c: VMM functions now operate on address space,
	not on teams.

	* modules/vmm/_vmm_as.c: VMM functions now operate on address space,
	not on teams. Check the return of kmutex_lock and kmutex_unlock
	for safety.

	* modules/vmm/_vmm.h: VMM functions now operate on address space,
	not on teams.

	* modules/test/vmm_test.c: A couple of updates, whitespaces
	cleanup. In the hope of the stress test to pass, one day, maybe
	;-)

	* modules/test/test.c: Add the new mutex test.

	* modules/test/sem_test.c: Semaphore test update. Less kernel
	threads are created, less time is spent in usleep() and
	create_kernel_thread() return is checked to make sure all kernel
	threads are correctly created.

	* modules/test/mutex_test.c: New mutex test.

	* modules/test/_test.h: New mutex_test() function.

	* modules/test/Makefile (OBJS): New mutex_test.c file.

	* modules/task/_task_kstack.c (unallocate_cpl0_stack): Update
	calls to unmap_virtual_range() according to new prototype.

	* modules/pci/_pci.c (_pci_scan_bus): Not initializing pca
	(pci_config_address) to 0 is not good ! Now, PCI devices are
	correctly detected under Bochs and Qemu. Even a network adapter,
	along with its I/O space and IRQ configuration can be detected !

	* modules/lib/std/stdlib.h (printk): printk is re-defined as
	ktty_printk, and not bochs_printk. By the way, I find this #define
	quite ugly.

	* modules/kos/wolfgang.c (primary_thread): Testing, testing,
	debugging, debugging, testing...

	* modules/kitc/_kmutex.c: Changed k_ui32_t to
	spinlock_flags_t. Added some debugging messages and assertions.

	* MkVars (KOSSYSPATH): The path to the kos-sys CVS module, in
	which the hard disk image is stored. The default value is
	../kos-sys/, but it can be overriden using the .mkvars file.

	* Makefile (qemu): Instead of an hard-coded path to the hard disk
	image, we use a KOSSYSPATH variable that can be overriden in the
	.mkvars file.



Index: vmm.h
===================================================================
RCS file: /home/kos/cvs/kos/modules/vmm/vmm.h,v
retrieving revision 1.27
retrieving revision 1.28
diff -u -d -r1.27 -r1.28
--- vmm.h	28 Dec 2004 18:44:56 -0000	1.27
+++ vmm.h	29 Dec 2004 19:28:51 -0000	1.28
@@ -37,7 +37,7 @@
 {
   enum { PF_PRESENT=0x10, PF_NOT_PRESENT } page_present;
 
-  access_right_t access; 
+  access_right_t access;
 
   enum { PF_ACCESS_USER=0x20, PF_ACCESS_SUPERVISOR } who;
 };
@@ -48,6 +48,10 @@
  * and each address space is owned by EXACTLY 1 team. This structure
  * is only here in order to make a clean boundary between the VMM and
  * Task subsystems.
+ *
+ * This structure must be public because the "struct task" contains an
+ * address space, and not a pointer to it. So gcc has to know the
+ * storage size of an address space.
  */
 struct address_space
 {
@@ -58,7 +62,7 @@
   struct virtual_region  *vr_tree;
 
   /** Architecture dependant informations */
-  struct mm_context *mm_context; 
+  struct mm_context *mm_context;
 
   /** Start of the heap, current position of the heap */
   vaddr_t heap_start, heap_current;
@@ -75,8 +79,12 @@
 /**
  * A virtual region: a contiguous area of virtual memory, with
  * flags indicating its behaviour when a page fault occurs in it.
+ *
+ * This structure must be public because the KARM interface "mapping"
+ * uses virtual regions. Maybe we should replace this public access by
+ * accessors.
  */
-struct virtual_region 
+struct virtual_region
 {
   /** the key is the region start address */
   tree_node_t node;
@@ -116,9 +124,6 @@
 
 };
 
-
-//#include <task/task.h>
-
 struct team;
 
 /* _vmm_as.c */
@@ -153,22 +158,22 @@
 			vaddr_t *heap_current);
 
 /* _vmm_map.c */
-int map_virtual_page(const struct team* dest_team,
+int map_virtual_page(const struct address_space* address_space,
 		     vaddr_t virt, paddr_t phys,
 		     access_right_t access_rights);
-int unmap_virtual_range(const struct team* dest_team,
+int unmap_virtual_range(const struct address_space* address_space,
 			vaddr_t start, size_t len);
-int unmap_virtual_page(const struct team* dest_team,
+int unmap_virtual_page(const struct address_space* address_space,
 		       vaddr_t vaddr);
-int protect_virtual_page(const struct team* dest_team,
+int protect_virtual_page(const struct address_space* address_space,
 			 vaddr_t vaddr,
 			 access_right_t access_rights);
-result_t protect_virtual_range(const struct team *dest_team,
+result_t protect_virtual_range(const struct address_space* address_space,
 			       vaddr_t start, vaddr_t end,
 			       access_right_t access_rights);
-result_t get_paddr_at_vaddr(const struct team *dest_team,
+result_t get_paddr_at_vaddr(const struct address_space* address_space,
 			    vaddr_t virt, paddr_t *paddr);
-result_t get_virtual_page_status(const struct team *team,
+result_t get_virtual_page_status(const struct address_space* address_space,
 				 vaddr_t vaddr, vpage_status_t *status);
 
 #endif

Index: vmm.c
===================================================================
RCS file: /home/kos/cvs/kos/modules/vmm/vmm.c,v
retrieving revision 1.19
retrieving revision 1.20
diff -u -d -r1.19 -r1.20
--- vmm.c	28 Dec 2004 18:44:56 -0000	1.19
+++ vmm.c	29 Dec 2004 19:28:51 -0000	1.20
@@ -21,15 +21,35 @@
 
 DECLARE_INIT_SYMBOL(init_module_level1, INIT_LEVEL1);
 
-EXPORT_FUNCTION(unmap_virtual_page);
-EXPORT_FUNCTION(unmap_virtual_range);
-EXPORT_FUNCTION(map_virtual_page);
-EXPORT_FUNCTION(get_paddr_at_vaddr);
-EXPORT_FUNCTION(as_page_fault);
+EXPORT_FUNCTION_RESTRICTED(unmap_virtual_page, init);
+EXPORT_FUNCTION_RESTRICTED(map_virtual_page,   init);
+
+EXPORT_FUNCTION_RESTRICTED(unmap_virtual_page, kmem);
+EXPORT_FUNCTION_RESTRICTED(unmap_virtual_range, kmem);
+EXPORT_FUNCTION_RESTRICTED(map_virtual_page,   kmem);
+EXPORT_FUNCTION_RESTRICTED(get_paddr_at_vaddr, kmem);
+
+EXPORT_FUNCTION_RESTRICTED(unmap_virtual_page, pmm);
+EXPORT_FUNCTION_RESTRICTED(map_virtual_page,   pmm);
+
+/* TODO : Remove this dependency */
+EXPORT_FUNCTION_RESTRICTED(unmap_virtual_page, kos);
+EXPORT_FUNCTION_RESTRICTED(map_virtual_page,   kos);
+
+/* TODO : Maybe remove this dependency ? */
+EXPORT_FUNCTION_RESTRICTED(unmap_virtual_range, task);
+EXPORT_FUNCTION_RESTRICTED(map_virtual_page,    task);
+
+/* TODO : Maybe remove this dependency ? */
+EXPORT_FUNCTION_RESTRICTED(map_virtual_page, arch_task);
+
+EXPORT_FUNCTION_RESTRICTED(as_page_fault, arch_mm);
+
 EXPORT_FUNCTION_RESTRICTED (as_init,   task);
 EXPORT_FUNCTION_RESTRICTED (as_switch, task);
 EXPORT_FUNCTION_RESTRICTED (as_copy,   task);
 EXPORT_FUNCTION_RESTRICTED (as_empty,  task);
+
 EXPORT_FUNCTION(as_map_ures);
 EXPORT_FUNCTION(as_unmap_ures);
 EXPORT_FUNCTION(as_dump);

Index: _vmm_map.c
===================================================================
RCS file: /home/kos/cvs/kos/modules/vmm/_vmm_map.c,v
retrieving revision 1.6
retrieving revision 1.7
diff -u -d -r1.6 -r1.7
--- _vmm_map.c	28 Dec 2004 18:44:56 -0000	1.6
+++ _vmm_map.c	29 Dec 2004 19:28:51 -0000	1.7
@@ -8,8 +8,9 @@
 
 /** Map a physical page into an address space
  *
- * @param dest_team The team in which the page will be mapped, if
- * NULL, current team will be considered as desintation team.
+ * @param address_space The address space in which the page will be
+ * mapped, if NULL, the address space of the current team will be
+ * considered as desintation address space.
  *
  * @param virt The virtual address at which the physical page has to
  * be mapped.
@@ -23,7 +24,7 @@
  *
  * @return Error code (@see errno.h)
  */
-result_t map_virtual_page(const struct team* dest_team,
+result_t map_virtual_page(const struct address_space* address_space,
 			  vaddr_t virt, paddr_t phys,
 			  access_right_t access_rights)
 {
@@ -31,8 +32,11 @@
   struct map_session map_session;
   spinlock_flags_t flags;
 
-  DEBUG_PRINT1("[map_virtual_page] Mapping phys 0x%x => virt 0x%x\n",
-	       phys, virt);
+  CONCEPTION_ASSERT(PAGE_ALIGN_INF(virt) == virt);
+  CONCEPTION_ASSERT(PAGE_ALIGN_INF(phys) == phys);
+
+  DEBUG_PRINT1("[map_virtual_page] Mapping phys 0x%x => virt 0x%x in AS 0x%x\n",
+	       phys, virt, address_space);
 
   result = arch_pre_map_virtual_page(& map_session);
   if(result < 0)
@@ -45,7 +49,7 @@
   write_spin_lock(vmm_spinlock, flags);
 
   result = arch_do_map_virtual_page(& map_session,
-				    (dest_team) ? (dest_team->address_space.mm_context) : NULL,
+				    (address_space) ? (address_space->mm_context) : NULL,
 				    phys, virt, access_rights);
 
   write_spin_unlock(vmm_spinlock, flags);
@@ -69,8 +73,9 @@
 
 /** Change the access rights of a single virtual page
  *
- * @param dest_team The team in which the page is mapped. If NULL,
- * current team will be considered as destination team.
+ * @param address_space The address space in which resides the page
+ * for which protection rights should be changed. If NULL, the address
+ * space of the current team is considered.
  *
  * @param vaddr The virtual address of the page
  *
@@ -81,7 +86,7 @@
  *
  * @result Error code
  */
-result_t protect_virtual_page(const struct team* dest_team,
+result_t protect_virtual_page(const struct address_space* address_space,
 			      vaddr_t vaddr,
 			      access_right_t access_rights)
 {
@@ -89,10 +94,12 @@
   result_t result;
   spinlock_flags_t flags;
 
-  if(dest_team == NULL)
+  CONCEPTION_ASSERT(PAGE_ALIGN_INF(vaddr) == vaddr);
+
+  if(address_space == NULL)
     mm_context = NULL;
   else
-    mm_context = dest_team->address_space.mm_context;
+    mm_context = address_space->mm_context;
 
   write_spin_lock(vmm_spinlock, flags);
 
@@ -105,8 +112,9 @@
 
 /** Change the access rights of a range of virtual pages
  *
- * @param dest_team The team in which the range of pages is mapped. If
- * NULL, the current team will be considered as the destination team.
+ * @param address_space The address space in which resides the pages
+ * for which protection rights should be changed. If NULL, the address
+ * space of the current team is considered.
  *
  * @param start Virtual start address of the range
  *
@@ -119,18 +127,17 @@
  *
  * @return Error code
  */
-result_t protect_virtual_range(const struct team *dest_team,
+result_t protect_virtual_range(const struct address_space *address_space,
 			       vaddr_t start, vaddr_t end,
 			       access_right_t access_rights)
 {
-  spinlock_flags_t flags;
   vaddr_t vaddr;
 
   CONCEPTION_ASSERT(PAGE_ALIGN_INF(start) == start);
 
   for (vaddr = start ; vaddr < end ; vaddr += PAGE_SIZE)
     {
-      protect_virtual_page(dest_team, vaddr, access_rights);
+      protect_virtual_page(address_space, vaddr, access_rights);
     }
 
   return ESUCCESS;
@@ -138,9 +145,9 @@
 
 /** Unmap a virtual page
  *
- * @param dest_team The team in which the virtual page to unmap is
- * mapped. If NULL, the current team will be considered as the
- * destination team.
+ * @param address_space The address space in which reside the page
+ * that should be unmapped. If NULL, the address space of the current
+ * team is considered.
  *
  * @param vaddr The virtual address of the page to unmap
  *
@@ -149,7 +156,7 @@
  *
  * @return Error code
  */
-result_t unmap_virtual_page(const struct team* dest_team,
+result_t unmap_virtual_page(const struct address_space *address_space,
 			    vaddr_t vaddr)
 {
   struct map_session map_session;
@@ -158,7 +165,9 @@
   result_t result;
   spinlock_flags_t flags;
 
-  mm_ctxt = ((dest_team) ? (dest_team->address_space.mm_context) : NULL);
+  CONCEPTION_ASSERT(PAGE_ALIGN_INF(vaddr) == vaddr);
+
+  mm_ctxt = ((address_space) ? (address_space->mm_context) : NULL);
 
   DEBUG_PRINT1("[vmm/unmap_virtual_page] Unmapping 0x%x (mm_ctxt=0x%x)\n",
 	       vaddr, mm_ctxt);
@@ -205,7 +214,9 @@
 
 /** Unmap a virtual range
  *
- * @param dest_team The destination team
+ * @param address_space The address in which reside the pages that
+ * should be unmapped. If NULL, the address space of the current team
+ * is considered.
  *
  * @param start The starting address of the area to unmap
  *
@@ -218,14 +229,17 @@
  * @return Error code. If an error is returned, then the range is left
  * partially mapped, partially unmapped.
  */
-result_t unmap_virtual_range(const struct team* dest_team, vaddr_t start, size_t len)
+result_t unmap_virtual_range(const struct address_space *address_space,
+			     vaddr_t start, size_t len)
 {
   result_t result;
   vaddr_t page;
 
+  CONCEPTION_ASSERT(PAGE_ALIGN_INF(start) == start);
+
   for ( page = start ; page < (start + len) ; page += PAGE_SIZE)
     {
-      result = unmap_virtual_page(dest_team, page);
+      result = unmap_virtual_page(address_space, page);
       if(result < 0)
 	{
 	  return result;
@@ -235,13 +249,14 @@
   return ESUCCESS;
 }
 
-/** Remap the given virtual range to an other team
+/** Remap the given virtual range to an other address space
  *
  * This function remaps all the page of the virtual range [start ;
- * end] of the <b>current</b> team to the given destination team
- * (dest_team). This function is used in the fork() mechanism.
+ * end] of the <b>current</b> address space to the given destination
+ * address space (address_space). This function is used in the fork()
+ * mechanism.
  *
- * @param dest_team     The destination team
+ * @param address_space The destination team
  * @param start         Beginning of the virtual range
  * @param end           End of the virtual range
  * @param access_rights Access rights that applies to the remapped range
@@ -251,18 +266,20 @@
  * @todo Detect map_virtual_page errors. Do the calls to arch_* by
  * hand to be able to correctly handle lock problems.
  */
-result_t dup_virtual_range(const struct team *dest_team, vaddr_t start, vaddr_t end,
+result_t _dup_virtual_range(const struct address_space *address_space, vaddr_t start, vaddr_t end,
 			   access_right_t access_rights)
 {
   vaddr_t cur;
 
+  CONCEPTION_ASSERT(PAGE_ALIGN_INF(start) == start);
+
   for (cur = start ; cur < end ; cur += PAGE_SIZE)
     {
       paddr_t paddr;
 
       get_paddr_at_vaddr(NULL, cur, & paddr);
 
-      map_virtual_page(dest_team, cur, paddr, access_rights);
+      map_virtual_page(address_space, cur, paddr, access_rights);
     }
 
   return ESUCCESS;
@@ -270,7 +287,8 @@
 
 /** Get the status of a virtual page (either mapped, swapped or unmapped)
  *
- * @param team The destination team
+ * @param address_space The address space in which the page is. If
+ * NULL, the address space of the current team is considered.
  *
  * @param vaddr The address of the virtual page
  *
@@ -278,17 +296,17 @@
  *
  * @return Error code
  */
-result_t get_virtual_page_status(const struct team *team,
+result_t get_virtual_page_status(const struct address_space *address_space,
 				 vaddr_t vaddr, vpage_status_t *status)
 {
   struct mm_context *mm_context;
   result_t result;
   spinlock_flags_t flags;
 
-  if(team == NULL)
+  if(address_space == NULL)
     mm_context = NULL;
   else
-    mm_context = team->address_space.mm_context;
+    mm_context = address_space->mm_context;
 
   write_spin_lock(vmm_spinlock, flags);
 
@@ -301,7 +319,8 @@
 
 /** Get the physical address of a virtual page
  *
- * @param team Destination team
+ * @param address_space The address space in which the page is. If
+ * NULL, the address space of the current team is considered.
  *
  * @param vaddr Virtual address of the page
  *
@@ -309,16 +328,17 @@
  *
  * @return Error code
  */
-result_t get_paddr_at_vaddr(const struct team *team, vaddr_t vaddr, paddr_t *paddr)
+result_t get_paddr_at_vaddr(const struct address_space *address_space,
+			    vaddr_t vaddr, paddr_t *paddr)
 {
   struct mm_context *mm_context;
   result_t result;
   spinlock_flags_t flags;
 
-  if(team == NULL)
+  if(address_space == NULL)
     mm_context = NULL;
   else
-    mm_context = team->address_space.mm_context;
+    mm_context = address_space->mm_context;
 
   write_spin_lock(vmm_spinlock, flags);
 

Index: _vmm.h
===================================================================
RCS file: /home/kos/cvs/kos/modules/vmm/_vmm.h,v
retrieving revision 1.19
retrieving revision 1.20
diff -u -d -r1.19 -r1.20
--- _vmm.h	28 Dec 2004 18:44:56 -0000	1.19
+++ _vmm.h	29 Dec 2004 19:28:51 -0000	1.20
@@ -7,8 +7,8 @@
 #include <arch/mm/mm.h>
 
 /* _vmm_map.c */
-result_t dup_virtual_range(const struct team *dest_team, vaddr_t start, vaddr_t end,
-			   access_right_t access_rights);
+result_t _dup_virtual_range(const struct address_space *address_space, vaddr_t start, vaddr_t end,
+			    access_right_t access_rights);
 
 __init_text result_t _init_as_engine(void);
 

Index: _vmm_as.c
===================================================================
RCS file: /home/kos/cvs/kos/modules/vmm/_vmm_as.c,v
retrieving revision 1.25
retrieving revision 1.26
diff -u -d -r1.25 -r1.26
--- _vmm_as.c	28 Dec 2004 18:44:56 -0000	1.25
+++ _vmm_as.c	29 Dec 2004 19:28:51 -0000	1.26
@@ -174,15 +174,14 @@
 			    vr->access_right & ~VM_ACCESS_WRITE);
 
       /* Duplicate the virtual => physical references, as read-only */
-      dup_virtual_range(as->owner_team, vr->node.key, vr->end,
-			vr->access_right & ~VM_ACCESS_WRITE);
+      _dup_virtual_range(as, vr->node.key, vr->end,
+			 vr->access_right & ~VM_ACCESS_WRITE);
     }
   else
     {
       /* Duplicate the virtual => physical references, with the same
 	 access rights (MAP_SHARED) */
-      dup_virtual_range(as->owner_team, vr->node.key, vr->end,
-			vr->access_right);
+      _dup_virtual_range(as, vr->node.key, vr->end, vr->access_right);
     }
 
   new_vr->pos_in_ures  = vr->pos_in_ures;
@@ -215,7 +214,7 @@
   as_to = & team_to->address_space;
 
   /* Take the semaphore of the 'from' address space. */
-  kmutex_lock(& as_from->mutex);
+  CONCEPTION_ASSERT(kmutex_lock(& as_from->mutex) == ESUCCESS);
 
   /* The destination address space should NOT be used by anybody
      else */
@@ -250,8 +249,8 @@
    */
 
   //  Problem with kmutex_trylock, see above
-  //  kmutex_unlock(& as_to->mutex);
-  kmutex_unlock(& as_from->mutex);
+  //  CONCEPTION_ASSERT(kmutex_unlock(& as_to->mutex) == ESUCCESS);
+  CONCEPTION_ASSERT(kmutex_unlock(& as_from->mutex) == ESUCCESS);
 
   return ESUCCESS;
 }
@@ -267,7 +266,7 @@
 {
   struct virtual_region *vr;
 
-  kmutex_lock(& as->mutex);
+  CONCEPTION_ASSERT(kmutex_lock(& as->mutex) == ESUCCESS);
 
   while ((vr = as->vr_tree) != NULL)
     {
@@ -293,7 +292,7 @@
       kslab_cache_free(vmm_vr_cache, vr);
     }
 
-  kmutex_unlock(& as->mutex);
+  CONCEPTION_ASSERT(kmutex_unlock(& as->mutex) == ESUCCESS);
 
   return ESUCCESS;
 }
@@ -726,9 +725,9 @@
 
   DEBUG_PRINT2("as_unmap_ures\n");
 
-  kmutex_lock(& as->mutex);
+  CONCEPTION_ASSERT(kmutex_lock(& as->mutex) == ESUCCESS);
   result = _as_unmap_ures(as, start, size);
-  kmutex_unlock(& as->mutex);
+  CONCEPTION_ASSERT(kmutex_unlock(& as->mutex) == ESUCCESS);
 
   return result;
 }
@@ -842,7 +841,7 @@
 	     address. Unmap any annoying virtual region */
 	  if(flags & MAP_FIXED)
 	    {
-	      result = as_unmap_ures(as, vaddr_start, size);
+	      result = _as_unmap_ures(as, vaddr_start, size);
 	      if(result < 0)
 		{
 		  return result;
@@ -955,10 +954,10 @@
   DEBUG_PRINT2("as_map_ures\n");
   ASSERT_RETURN_VAL(as, -EINVAL);
 
-  kmutex_lock(& as->mutex);
+  CONCEPTION_ASSERT(kmutex_lock(& as->mutex) == ESUCCESS);
   result = _as_map_ures(as, ures, pos_in_ures, vaddr_start, size, sharing_type,
 			access_right, flags, out_vaddr);
-  kmutex_unlock(& as->mutex);
+  CONCEPTION_ASSERT(kmutex_unlock(& as->mutex) == ESUCCESS);
 
   return result;
 }
@@ -991,7 +990,7 @@
   thread = get_current_thread();
   ASSERT_FATAL(thread != NULL);
 
-  kmutex_lock(& as->mutex);
+  CONCEPTION_ASSERT(kmutex_lock(& as->mutex) == ESUCCESS);
 
   if(vaddr >= (thread->user_stack_addr + PAGE_SIZE)
      && (vaddr <= thread->user_stack_addr + thread->user_stack_size)
@@ -1017,7 +1016,7 @@
       if(vr == NULL)
 	{
 	  DEBUG_PRINT2("Out of any region -> FAULT\n");
-	  kmutex_unlock(& as->mutex);
+	  CONCEPTION_ASSERT(kmutex_unlock(& as->mutex) == ESUCCESS);
 	  return -EFAULT;
 	}
     }
@@ -1026,6 +1025,7 @@
      ((vr->access_right & VM_ACCESS_WRITE) == 0))
     {
       DEBUG_PRINT2("Write access denied -> FAULT\n");
+      CONCEPTION_ASSERT(kmutex_unlock(& as->mutex) == ESUCCESS);
       return -EFAULT;
     }
 
@@ -1056,6 +1056,7 @@
 			       vr->access_right);
 
 	  DEBUG_PRINT2("COW (without copy) OK\n");
+	  CONCEPTION_ASSERT(kmutex_unlock(& as->mutex) == ESUCCESS);
 	  return ESUCCESS;
 	}
 
@@ -1064,7 +1065,7 @@
       copy_page_vaddr = kvalloc(1, FALSE, FALSE);
       if(copy_page_vaddr == 0)
 	{
-	  kmutex_unlock(& as->mutex);
+	  CONCEPTION_ASSERT(kmutex_unlock(& as->mutex) == ESUCCESS);
 	  return -ENOMEM;
 	}
 
@@ -1072,7 +1073,7 @@
       if(paddr == 0)
 	{
 	  kvfree(copy_page_vaddr, FALSE);
-	  kmutex_unlock(& as->mutex);
+	  CONCEPTION_ASSERT(kmutex_unlock(& as->mutex) == ESUCCESS);
 	  return -ENOMEM;
 	}
 
@@ -1094,19 +1095,20 @@
       if(result < 0)
 	{
 	  physmem_put_page(paddr);
-	  kmutex_unlock(& as->mutex);
+	  CONCEPTION_ASSERT(kmutex_unlock(& as->mutex) == ESUCCESS);
 	  return -ENOMEM;
 	}
 
       /* Done with COW, return */
       DEBUG_PRINT2("COW OK\n");
+      CONCEPTION_ASSERT(kmutex_unlock(& as->mutex) == ESUCCESS);
       return ESUCCESS;
     }
 
   paddr = physmem_get_page(PHYS_PAGE_USER, PHYS_PAGE_SWAPPABLE);
   if (paddr == 0)
     {
-      kmutex_unlock(& as->mutex);
+      CONCEPTION_ASSERT(kmutex_unlock(& as->mutex) == ESUCCESS);
       return -ENOMEM;
     }
 
@@ -1115,7 +1117,7 @@
   if(result < 0)
     {
       physmem_put_page(paddr);
-      kmutex_unlock(& as->mutex);
+      CONCEPTION_ASSERT(kmutex_unlock(& as->mutex) == ESUCCESS);
       return result;
     }
 
@@ -1128,7 +1130,7 @@
 	  DEBUG_PRINT2("Failed to page_in : %d\n", result);
 	  unmap_virtual_page(NULL, vaddr);
 	  physmem_put_page(paddr);
-	  kmutex_unlock(& as->mutex);
+	  CONCEPTION_ASSERT(kmutex_unlock(& as->mutex) == ESUCCESS);
 	  return result;
 	}
     }
@@ -1139,7 +1141,7 @@
     }
 
   DEBUG_PRINT2("page_in OK\n");
-  kmutex_unlock(& as->mutex);
+  CONCEPTION_ASSERT(kmutex_unlock(& as->mutex) == ESUCCESS);
   return ESUCCESS;
 }
 
@@ -1229,9 +1231,9 @@
 
   DEBUG_PRINT3 (_B_BLUE "*********** AS DUMP @0x%p ************\n", as);
 
-  kmutex_lock(& as->mutex);
+  CONCEPTION_ASSERT(kmutex_lock(& as->mutex) == ESUCCESS);
   bst_visit_in_order ( (tree_node_t*) as->vr_tree, (visitor_t) visitor, NULL);
-  kmutex_unlock(& as->mutex);
+  CONCEPTION_ASSERT(kmutex_unlock(& as->mutex) == ESUCCESS);
 
   DEBUG_PRINT3 ("**********************************\n" _B_NORM);
 }
@@ -1256,9 +1258,9 @@
 
   DEBUG_PRINT2("as_update_heap_start\n");
 
-  kmutex_lock(& as->mutex);
+  CONCEPTION_ASSERT(kmutex_lock(& as->mutex) == ESUCCESS);
   as->heap_start = as->heap_current = heap_start;
-  kmutex_unlock(& as->mutex);
+  CONCEPTION_ASSERT(kmutex_unlock(& as->mutex) == ESUCCESS);
 
   return ESUCCESS;
 }
@@ -1288,7 +1290,7 @@
 
   DEBUG_PRINT2("as_change_heap\n");
 
-  kmutex_lock(& as->mutex);
+  CONCEPTION_ASSERT(kmutex_lock(& as->mutex) == ESUCCESS);
 
   if (increment != 0)
     {
@@ -1303,7 +1305,7 @@
   if(wanted_heap == 0)
     {
       *heap_current = as->heap_current;
-      kmutex_unlock(& as->mutex);
+      CONCEPTION_ASSERT(kmutex_unlock(& as->mutex) == ESUCCESS);
       return ESUCCESS;
     }
 
@@ -1312,7 +1314,7 @@
   if(wanted_heap < as->heap_start)
     {
       *heap_current = as->heap_current;
-      kmutex_unlock(& as->mutex);
+      CONCEPTION_ASSERT(kmutex_unlock(& as->mutex) == ESUCCESS);
       return -EINVAL;
     }
 
@@ -1330,7 +1332,7 @@
       if(result < 0)
 	{
 	  *heap_current = as->heap_current;
-	  kmutex_unlock(& as->mutex);
+	  CONCEPTION_ASSERT(kmutex_unlock(& as->mutex) == ESUCCESS);
 	  return result;
 	}
 
@@ -1355,7 +1357,7 @@
       if(result < 0)
 	{
 	  *heap_current = as->heap_current;
-	  kmutex_unlock(& as->mutex);
+	  CONCEPTION_ASSERT(kmutex_unlock(& as->mutex) == ESUCCESS);
 	  return result;
 	}
 
@@ -1375,7 +1377,7 @@
       if(result < 0)
 	{
 	  *heap_current = as->heap_current;
-	  kmutex_unlock(& as->mutex);
+	  CONCEPTION_ASSERT(kmutex_unlock(& as->mutex) == ESUCCESS);
 	  return result;
 	}
 
@@ -1383,7 +1385,7 @@
       *heap_current    = wanted_heap;
     }
 
-  kmutex_unlock(& as->mutex);
+  CONCEPTION_ASSERT(kmutex_unlock(& as->mutex) == ESUCCESS);
 
   return ESUCCESS;
 }



More information about the Kos-cvs mailing list