view psgc-bind-procs.patch @ 1:cef23de210a4

Bunch of fixes to get Zero to build.
author Roman Kennke <rkennke@redhat.com>
date Mon, 30 Jul 2012 13:57:49 +0200
parents
children
line wrap: on
line source

# HG changeset patch
# Parent 144e4e0a3b16431210ba8c49030e33129803e7ef
diff --git a/src/os/linux/vm/os_linux.cpp b/src/os/linux/vm/os_linux.cpp
--- a/src/os/linux/vm/os_linux.cpp
+++ b/src/os/linux/vm/os_linux.cpp
@@ -4356,14 +4356,108 @@
   }
 }
 
+// TODO: This is almost 100% copied from os_solaris.cpp,
+// it can and should be extracted into a shared method.
+static bool assign_distribution(uint*           id_array,
+                                uint           id_length,
+                                uint*          distribution,
+                                uint           distribution_length) {
+  // Quick check to see if we won't succeed.
+  if (id_length < distribution_length) {
+    return false;
+  }
+  // Assign processor ids to the distribution.
+  // Try to shuffle processors to distribute work across boards,
+  // assuming 4 processors per board.
+  const uint processors_per_board = ProcessDistributionStride;
+  // Find the maximum processor id.
+  uint max_id = 0;
+  for (uint m = 0; m < id_length; m += 1) {
+    max_id = MAX2(max_id, id_array[m]);
+  }
+  // The next id, to limit loops.
+  const uint limit_id = max_id + 1;
+  // Make up markers for available processors.
+  bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
+  for (uint c = 0; c < limit_id; c += 1) {
+    available_id[c] = false;
+  }
+  for (uint a = 0; a < id_length; a += 1) {
+    available_id[id_array[a]] = true;
+  }
+  // Step by "boards", then by "slot", copying to "assigned".
+  // NEEDS_CLEANUP: The assignment of processors should be stateful,
+  //                remembering which processors have been assigned by
+  //                previous calls, etc., so as to distribute several
+  //                independent calls of this method.  What we'd like is
+  //                It would be nice to have an API that let us ask
+  //                how many processes are bound to a processor,
+  //                but we don't have that, either.
+  //                In the short term, "board" is static so that
+  //                subsequent distributions don't all start at board 0.
+  static uint board = 0;
+  uint assigned = 0;
+  // Until we've found enough processors ....
+  while (assigned < distribution_length) {
+    // ... find the next available processor in the board.
+    for (uint slot = 0; slot < processors_per_board; slot += 1) {
+      uint try_id = board * processors_per_board + slot;
+      if ((try_id < limit_id) && (available_id[try_id] == true)) {
+        distribution[assigned] = try_id;
+        available_id[try_id] = false;
+        assigned += 1;
+        break;
+      }
+    }
+    board += 1;
+    if (board * processors_per_board + 0 >= limit_id) {
+      board = 0;
+    }
+  }
+  if (available_id != NULL) {
+    FREE_C_HEAP_ARRAY(bool, available_id, mtInternal);
+  }
+  return true;
+}
+
 bool os::distribute_processes(uint length, uint* distribution) {
-  // Not yet implemented.
+
+  cpu_set_t cpu_set;
+
+  int result =  pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpu_set);
+
+  if (result == 0) {
+
+    uint cpu_ids_length;
+    uint *cpu_ids;
+    uint cpu_ids_found = 0;
+
+    cpu_ids_length = CPU_COUNT(&cpu_set);
+
+    cpu_ids = NEW_C_HEAP_ARRAY(uint, cpu_ids_length, mtInternal);
+
+    for (uint i = 0; i < CPU_SETSIZE && cpu_ids_found < cpu_ids_length; i++) {
+      if (CPU_ISSET(i, &cpu_set)) {
+	cpu_ids[cpu_ids_found] = i;
+	cpu_ids_found++;
+      }
+    }
+    bool result = assign_distribution(cpu_ids, cpu_ids_length, distribution, length);
+
+    FREE_C_HEAP_ARRAY(uint, cpu_ids, mtInternal);
+
+    return result;
+  }
   return false;
 }
 
 bool os::bind_to_processor(uint processor_id) {
-  // Not yet implemented.
-  return false;
+  cpu_set_t cpu_set;
+  CPU_ZERO(&cpu_set);
+  CPU_SET(processor_id, &cpu_set);
+  pthread_t current_thread = pthread_self();
+  int result = pthread_setaffinity_np(current_thread, sizeof(cpu_set), &cpu_set);
+  return result;
 }
 
 ///