[Kos-cvs] kos/modules/test Makefile, 1.9, 1.10 _test.h, 1.2, 1.3 mutex_test.c, NONE, 1.1 sem_test.c, 1.5, 1.6 sleep_test.c, 1.1, 1.2 test.c, 1.4, 1.5 vmm_test.c, 1.3, 1.4

thomas at kos.enix.org thomas at kos.enix.org
Wed Dec 29 20:28:53 CET 2004


Update of /home/kos/cvs/kos/modules/test
In directory the-doors:/tmp/cvs-serv1745/modules/test

Modified Files:
	Makefile _test.h sem_test.c sleep_test.c test.c vmm_test.c 
Added Files:
	mutex_test.c 
Log Message:
2004-12-29  Thomas Petazzoni  <thomas at crazy.kos.nx>

	* modules/x86/task/_thread_cpu_context.c
	(init_user_thread_context): VMM functions now work with address
	spaces.

	* modules/x86/mm/_vmap.c (arch_do_unmap_virtual_page): Unmapping a
	page that hasn't any corresponding PT is not an error. So, the
	ASSERT_FATAL() was removed, and replaced by a test that returns
	SUCCESS if there's not PT associated to the page.

	* modules/vmm/vmm.h: VMM functions now operate on address space
	not on teams.

	* modules/vmm/vmm.c: Try to limit the exports only to the modules
	that really need them.

	* modules/vmm/_vmm_map.c: VMM functions now operate on address space,
	not on teams.

	* modules/vmm/_vmm_as.c: VMM functions now operate on address space,
	not on teams. Check the return of kmutex_lock and kmutex_unlock
	for safety.

	* modules/vmm/_vmm.h: VMM functions now operate on address space,
	not on teams.

	* modules/test/vmm_test.c: A couple of updates, whitespaces
	cleanup. In the hope of the stress test to pass, one day, maybe
	;-)

	* modules/test/test.c: Add the new mutex test.

	* modules/test/sem_test.c: Semaphore test update. Less kernel
	threads are created, less time is spent in usleep() and
	create_kernel_thread() return is checked to make sure all kernel
	threads are correctly created.

	* modules/test/mutex_test.c: New mutex test.

	* modules/test/_test.h: New mutex_test() function.

	* modules/test/Makefile (OBJS): New mutex_test.c file.

	* modules/task/_task_kstack.c (unallocate_cpl0_stack): Update
	calls to unmap_virtual_range() according to new prototype.

	* modules/pci/_pci.c (_pci_scan_bus): Not initializing pca
	(pci_config_address) to 0 is not good ! Now, PCI devices are
	correctly detected under Bochs and Qemu. Even a network adapter,
	along with its I/O space and IRQ configuration can be detected !

	* modules/lib/std/stdlib.h (printk): printk is re-defined as
	ktty_printk, and not bochs_printk. By the way, I find this #define
	quite ugly.

	* modules/kos/wolfgang.c (primary_thread): Testing, testing,
	debugging, debugging, testing...

	* modules/kitc/_kmutex.c: Changed k_ui32_t to
	spinlock_flags_t. Added some debugging messages and assertions.

	* MkVars (KOSSYSPATH): The path to the kos-sys CVS module, in
	which the hard disk image is stored. The default value is
	../kos-sys/, but it can be overriden using the .mkvars file.

	* Makefile (qemu): Instead of an hard-coded path to the hard disk
	image, we use a KOSSYSPATH variable that can be overriden in the
	.mkvars file.



Index: vmm_test.c
===================================================================
RCS file: /home/kos/cvs/kos/modules/test/vmm_test.c,v
retrieving revision 1.3
retrieving revision 1.4
diff -u -d -r1.3 -r1.4
--- vmm_test.c	28 Dec 2004 18:44:52 -0000	1.3
+++ vmm_test.c	29 Dec 2004 19:28:51 -0000	1.4
@@ -13,7 +13,7 @@
 #include <task/task.h>
 #include <test/_test.h>
 
-//#define VMM_TEST_DEBUG
+#define VMM_TEST_DEBUG
 
 /**
  * Test the split of virtual regions
@@ -367,13 +367,15 @@
 	{
 	  return;
 	}
-      
+
+      __dbg_printk("[Thread start 0x%x] Mapping\n", start);
+
       result = as_map_ures(& get_current_team()->address_space,
 			   NULL, 0, start, REGION_SIZE,
 			   MAP_PRIVATE,
-			   VM_ACCESS_READ | VM_ACCESS_WRITE, 
+			   VM_ACCESS_READ | VM_ACCESS_WRITE,
 			   MAP_ANONYMOUS | MAP_FIXED, & vaddr1);
-      
+
       if(result < 0)
 	{
 	  write_spin_lock(vmm_test_lock, flags);
@@ -381,21 +383,23 @@
 	  write_spin_unlock(vmm_test_lock, flags);
 	  return;
 	}
-      
+
       area = (char *) vaddr1;
-      
+
       for (i = 0; i < REGION_SIZE; i += PAGE_SIZE)
 	{
 	  area[i+12] = 0x42;
 	}
-      
+
+      __dbg_printk("[Thread start 0x%x] Partial unmapping\n", start);
+
       /* Should partially unmap the previous region */
       result = as_map_ures(& get_current_team()->address_space,
 			   NULL, 0, (start + REGION_SIZE/2), REGION_SIZE,
 			   MAP_PRIVATE | MAP_FIXED,
 			   VM_ACCESS_READ | VM_ACCESS_WRITE, 
 			   MAP_ANONYMOUS | MAP_FIXED, & vaddr2);
-      
+
       if(result < 0)
 	{
 	  write_spin_lock(vmm_test_lock, flags);
@@ -403,28 +407,30 @@
 	  write_spin_unlock(vmm_test_lock, flags);
 	  return;
 	}
-      
+
       area = (char*) vaddr2;
-      
+
       for (i = 0; i < REGION_SIZE; i += PAGE_SIZE)
 	{
 	  area[i+12] = 0x42;
-	}  
-      
+	}
+
+      __dbg_printk("[Thread start 0x%x] Total unmapping\n", start);
+
       /* Unmap everything */
       result = as_unmap_ures(& get_current_team()->address_space,
 			     vaddr1, 2*REGION_SIZE);
-      
+
       if(result < 0)
 	{
 	  write_spin_lock(vmm_test_lock, flags);
 	  thread_count = -1;
 	  write_spin_unlock(vmm_test_lock, flags);
 	  return;
-	}  
+	}
 
       iter++;
-    }      
+    }
 
   write_spin_lock(vmm_test_lock, flags);
   thread_count--;
@@ -437,18 +443,18 @@
   int i;
   k_ui32_t flags;
 
-  for (i = 0; i < 10; i++)
+  for (i = 0; i < 2; i++)
     {
       create_kernel_thread(NULL, vmm_stress_test, (void *) (USER_SPACE_START + i*50*PAGE_SIZE));
 
       write_spin_lock(vmm_test_lock, flags);
       thread_count++;
-      write_spin_unlock(vmm_test_lock, flags);      
+      write_spin_unlock(vmm_test_lock, flags);
     }
-  
+
   while(thread_count > 0)
     {
-      usleep(10000000);
+      usleep(100000);
     }
 
   if(thread_count == 0)
@@ -461,7 +467,7 @@
     }
 
   return ESUCCESS;
-  
+
   result = split_test();
   if(result < 0)
     {

Index: _test.h
===================================================================
RCS file: /home/kos/cvs/kos/modules/test/_test.h,v
retrieving revision 1.2
retrieving revision 1.3
diff -u -d -r1.2 -r1.3
--- _test.h	11 Dec 2003 17:01:27 -0000	1.2
+++ _test.h	29 Dec 2004 19:28:51 -0000	1.3
@@ -16,5 +16,6 @@
 result_t sleep_test(void);
 result_t sem_test(void);
 result_t vmm_test(void);
+result_t mutex_test(void);
 
 #endif /* ___TEST_H__ */

Index: test.c
===================================================================
RCS file: /home/kos/cvs/kos/modules/test/test.c,v
retrieving revision 1.4
retrieving revision 1.5
diff -u -d -r1.4 -r1.5
--- test.c	27 Dec 2004 22:01:04 -0000	1.4
+++ test.c	29 Dec 2004 19:28:51 -0000	1.5
@@ -13,9 +13,10 @@
 #include <debug/debug.h>
 #include <test/_test.h>
 
-// #define SEM_TEST
-// #define SLEEP_TEST
-#define VMM_TEST
+//#define SEM_TEST
+//#define SLEEP_TEST
+//#define VMM_TEST
+#define MUTEX_TEST
 
 result_t test_run_all_tests(void)
 {
@@ -42,6 +43,13 @@
     return result;
 #endif
 
+#ifdef MUTEX_TEST
+  __dbg_printk("Mutex test...\n");
+  result = mutex_test();
+  if(result < 0)
+    return result;
+#endif
+
   return ESUCCESS;
 }
 

Index: Makefile
===================================================================
RCS file: /home/kos/cvs/kos/modules/test/Makefile,v
retrieving revision 1.9
retrieving revision 1.10
diff -u -d -r1.9 -r1.10
--- Makefile	11 Dec 2003 17:01:27 -0000	1.9
+++ Makefile	29 Dec 2004 19:28:51 -0000	1.10
@@ -1,4 +1,4 @@
-OBJS=test.o sleep_test.o sem_test.o vmm_test.o
+OBJS=test.o sleep_test.o sem_test.o vmm_test.o mutex_test.o
 
 all: test.ro
 

--- NEW FILE: mutex_test.c ---
/*
 * Copyright (C) 2000, Thomas Petazzoni
 * http://kos.enix.org
 *
 * Mutex function test.
 *
 * @(#) $Id: mutex_test.c,v 1.1 2004/12/29 19:28:51 thomas Exp $
 */

#include <scheduler/scheduler.h>
#include <kitc/kmutex.h>
#include <debug/debug.h>
#include <kmem/kmem.h>
#include <task/task.h>
#include <test/_test.h>

#define NB_THREADS 1
#define NB_ITERATIONS 10000000

static uint thread_count = 0;

static volatile uint value = 0;

static SPINLOCK(mutex_test_global_lock);

static void mutex_inc(void *data)
{
  struct kmutex *m = (struct kmutex *) data;
  int i = 0;
  spinlock_flags_t flags;

  while(i < NB_ITERATIONS)
    {

      CONCEPTION_ASSERT(kmutex_lock(m) == ESUCCESS);

      if(value == 0)
	{
	  value ++;
	}

      ASSERT_FATAL(value == 1);

      CONCEPTION_ASSERT(kmutex_unlock(m) == ESUCCESS);

      i++;
    }

  write_spin_lock(mutex_test_global_lock, flags);
  thread_count --;
  write_spin_unlock(mutex_test_global_lock, flags);
}

static void mutex_dec(void *data)
{
  struct kmutex *m = (struct kmutex *) data;
  int i = 0;
  spinlock_flags_t flags;

  while(i < NB_ITERATIONS)
    {

      CONCEPTION_ASSERT(kmutex_lock(m) == ESUCCESS);

      if(value == 1)
	{
	  value --;
	}

      ASSERT_FATAL(value == 0);

      CONCEPTION_ASSERT(kmutex_unlock(m) == ESUCCESS);

      i++;
    }

  write_spin_lock(mutex_test_global_lock, flags);
  thread_count --;
  write_spin_unlock(mutex_test_global_lock, flags);
}

result_t mutex_test()
{
  int i;
  spinlock_flags_t flags;
  struct kmutex mutex;

  kmutex_init(&mutex, "none", FALSE);

  for (i = 0; i < NB_THREADS; i++)
    {
      struct thread *t;

      t = create_kernel_thread(NULL, mutex_inc, (void *) & mutex);
      ASSERT_FATAL(t != NULL);

      t = create_kernel_thread(NULL, mutex_dec, (void *) & mutex);
      ASSERT_FATAL(t != NULL);

      write_spin_lock(mutex_test_global_lock, flags);
      thread_count += 2;
      write_spin_unlock(mutex_test_global_lock, flags);

    }

#if 0
  for (i = 0; i < TEST_NB_SEMAPHORE; i++)
    {
      struct ksem * ksem = kmalloc(sizeof(struct ksem));

      if(ksem == NULL)
	return -ENOMEM;

      result = ksem_init(ksem, "Sem test", 1);
      if(result < 0)
	return result;

      for (j = 0; j < TEST_NB_CONSUMER_PER_SEMAPHORE; j++)
	{
	  struct args * args = kmalloc(sizeof(struct args));
	  struct thread *t;

	  if(args == NULL)
	    return -ENOMEM;

	  args->sem = ksem;
	  args->id  = j;

	  t = create_kernel_thread(NULL, sem_consumer, (void *) args);
	  if(t == NULL)
	    {
	      FAILED_VERBOSE("Couldn't create sem_consumer thread\n");
	    }

	  write_spin_lock(sem_test_global_lock, flags);
	  thread_count++;
	  write_spin_unlock(sem_test_global_lock, flags);
	}

      for (j = 0; j < TEST_NB_PRODUCER_PER_SEMAPHORE; j++)
	{
	  struct thread *t;

	  t = create_kernel_thread(NULL, sem_producer, (void *) ksem);

	  if(t == NULL)
	    {
	      FAILED_VERBOSE("Couldn't create sem_producer thread\n");
	    }

	  write_spin_lock(sem_test_global_lock, flags);
	  thread_count++;
	  write_spin_unlock(sem_test_global_lock, flags);

	}
    }
#endif

  while(thread_count != 0)
    {
      usleep(1000000);
      __dbg_printk("Current thread_count = %d : ", thread_count);
      __dbg_printk("\n");
    }

  __dbg_printk("\n\nTest Ok\n\n");

  return ESUCCESS;
}

Index: sem_test.c
===================================================================
RCS file: /home/kos/cvs/kos/modules/test/sem_test.c,v
retrieving revision 1.5
retrieving revision 1.6
diff -u -d -r1.5 -r1.6
--- sem_test.c	9 Jun 2003 15:29:17 -0000	1.5
+++ sem_test.c	29 Dec 2004 19:28:51 -0000	1.6
@@ -14,8 +14,8 @@
 #include <task/task.h>
 #include <test/_test.h>
 
-#define TEST_NB_SEMAPHORE               10
-#define TEST_NB_CONSUMER_PER_SEMAPHORE  100
+#define TEST_NB_SEMAPHORE               5
+#define TEST_NB_CONSUMER_PER_SEMAPHORE  20
 #define TEST_NB_PRODUCER_PER_SEMAPHORE  TEST_NB_CONSUMER_PER_SEMAPHORE
 #define TEST_DURATION                   100
 
@@ -23,7 +23,7 @@
 
 static SPINLOCK(sem_test_global_lock);
 
-struct args 
+struct args
 {
   ksem_t *sem;
   int id;
@@ -43,7 +43,7 @@
       ksem_down(sem);
       for (j = 0; j < 100; j++)
 	j++; j--;
-      
+
       write_spin_lock(sem_test_global_lock, flags);
       nb_reception[args->id]++;
       write_spin_unlock(sem_test_global_lock, flags);
@@ -63,7 +63,7 @@
   for (i = 0; i < TEST_DURATION; i++)
     {
       ksem_up(sem);
-      usleep(1000000);
+      usleep(10000 * (((unsigned) data) % 7));
     }
 
   write_spin_lock(sem_test_global_lock, flags);
@@ -94,6 +94,7 @@
       for (j = 0; j < TEST_NB_CONSUMER_PER_SEMAPHORE; j++)
 	{
 	  struct args * args = kmalloc(sizeof(struct args));
+	  struct thread *t;
 
 	  if(args == NULL)
 	    return -ENOMEM;
@@ -101,7 +102,11 @@
 	  args->sem = ksem;
 	  args->id  = j;
 
-	  create_kernel_thread(NULL, sem_consumer, (void *) args);
+	  t = create_kernel_thread(NULL, sem_consumer, (void *) args);
+	  if(t == NULL)
+	    {
+	      FAILED_VERBOSE("Couldn't create sem_consumer thread\n");
+	    }
 
 	  write_spin_lock(sem_test_global_lock, flags);
 	  thread_count++;
@@ -110,7 +115,15 @@
 
       for (j = 0; j < TEST_NB_PRODUCER_PER_SEMAPHORE; j++)
 	{
-	  create_kernel_thread(NULL, sem_producer, (void *) ksem);
+	  struct thread *t;
+
+	  t = create_kernel_thread(NULL, sem_producer, (void *) ksem);
+
+	  if(t == NULL)
+	    {
+	      FAILED_VERBOSE("Couldn't create sem_producer thread\n");
+	    }
+
 	  write_spin_lock(sem_test_global_lock, flags);
 	  thread_count++;
 	  write_spin_unlock(sem_test_global_lock, flags);
@@ -120,7 +133,7 @@
 
   while(thread_count != 0)
     {
-      usleep(10000000);
+      usleep(1000000);
       __dbg_printk("Current thread_count = %d : ", thread_count);
       for (i = 0; i < TEST_NB_CONSUMER_PER_SEMAPHORE; i++)
 	__dbg_printk("%d ", nb_reception[i]);

Index: sleep_test.c
===================================================================
RCS file: /home/kos/cvs/kos/modules/test/sleep_test.c,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -d -r1.1 -r1.2
--- sleep_test.c	2 Jun 2003 18:11:23 -0000	1.1
+++ sleep_test.c	29 Dec 2004 19:28:51 -0000	1.2
@@ -13,7 +13,7 @@
 #include <test/_test.h>
 #include <kos/spinlock.h>
 
-#define TEST_NB_THREADS 3000
+#define TEST_NB_THREADS 100
 #define TEST_DURATION   100
 
 static uint thread_count = 0;
@@ -29,7 +29,7 @@
   /* Loop a bit, a call usleep() a couple of times */
   for (i = 0; i < TEST_DURATION; i++)
     {
-      usleep(delay * 100);
+      usleep(delay * 1000);
     }
 
   /* We're done, decrement thread counter */



More information about the Kos-cvs mailing list