patch-2.4.0-test9 linux/arch/ppc/kernel/smp.c
Next file: linux/arch/ppc/kernel/syscalls.c
Previous file: linux/arch/ppc/kernel/signal.c
Back to the patch index
Back to the overall index
- Lines: 455
- Date:
Sun Sep 17 09:48:07 2000
- Orig file:
v2.4.0-test8/linux/arch/ppc/kernel/smp.c
- Orig date:
Tue Sep 5 13:50:02 2000
diff -u --recursive --new-file v2.4.0-test8/linux/arch/ppc/kernel/smp.c linux/arch/ppc/kernel/smp.c
@@ -62,72 +62,62 @@
int start_secondary(void *);
extern int cpu_idle(void *unused);
u_int openpic_read(volatile u_int *addr);
+void smp_call_function_interrupt(void);
+void smp_message_pass(int target, int msg, unsigned long data, int wait);
+/* register for interrupting the primary processor on the powersurge */
+/* N.B. this is actually the ethernet ROM! */
+#define PSURGE_PRI_INTR 0xf3019000
/* register for interrupting the secondary processor on the powersurge */
-#define PSURGE_INTR ((volatile unsigned *)0xf80000c0)
+#define PSURGE_SEC_INTR 0xf80000c0
+/* register for storing the start address for the secondary processor */
+#define PSURGE_START 0xf2800000
+/* virtual addresses for the above */
+volatile u32 *psurge_pri_intr;
+volatile u32 *psurge_sec_intr;
+volatile u32 *psurge_start;
+
+/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. */
+#define PPC_MSG_CALL_FUNCTION 0
+#define PPC_MSG_RESCHEDULE 1
+#define PPC_MSG_INVALIDATE_TLB 2
+#define PPC_MSG_XMON_BREAK 3
+
+static inline void set_tb(unsigned int upper, unsigned int lower)
+{
+ mtspr(SPRN_TBWU, upper);
+ mtspr(SPRN_TBWL, lower);
+}
void smp_local_timer_interrupt(struct pt_regs * regs)
{
int cpu = smp_processor_id();
- extern void update_one_process(struct task_struct *,unsigned long,
- unsigned long,unsigned long,int);
- if (!--prof_counter[cpu]) {
- int user=0,system=0;
- struct task_struct * p = current;
-
- /*
- * After doing the above, we need to make like
- * a normal interrupt - otherwise timer interrupts
- * ignore the global interrupt lock, which is the
- * WrongThing (tm) to do.
- */
-
- if (user_mode(regs))
- user=1;
- else
- system=1;
-
- if (p->pid) {
- update_one_process(p, 1, user, system, cpu);
-
- p->counter -= 1;
- if (p->counter <= 0) {
- p->counter = 0;
- current->need_resched = 1;
- }
- if (p->nice > 0) {
- kstat.cpu_nice += user;
- kstat.per_cpu_nice[cpu] += user;
- } else {
- kstat.cpu_user += user;
- kstat.per_cpu_user[cpu] += user;
- }
-
- kstat.cpu_system += system;
- kstat.per_cpu_system[cpu] += system;
- }
+ if (!--prof_counter[cpu]) {
+ update_process_times(user_mode(regs));
prof_counter[cpu]=prof_multiplier[cpu];
}
}
-void smp_message_recv(int msg)
+void smp_message_recv(int msg, struct pt_regs *regs)
{
ipi_count++;
- switch( msg )
- {
- case MSG_STOP_CPU:
- __cli();
- while (1) ;
+ switch( msg ) {
+ case PPC_MSG_CALL_FUNCTION:
+ smp_call_function_interrupt();
break;
- case MSG_RESCHEDULE:
+ case PPC_MSG_RESCHEDULE:
current->need_resched = 1;
break;
- case MSG_INVALIDATE_TLB:
+ case PPC_MSG_INVALIDATE_TLB:
_tlbia();
- case 0xf0f0: /* pmac syncing time bases - just return */
break;
+#ifdef CONFIG_XMON
+ case PPC_MSG_XMON_BREAK:
+ xmon(regs);
+ break;
+#endif /* CONFIG_XMON */
default:
printk("SMP %d: smp_message_recv(): unknown msg %d\n",
smp_processor_id(), msg);
@@ -142,25 +132,38 @@
* smp_message[].
*
* This is because don't have several IPI's on the PowerSurge even though
- * we do on the chrp. It would be nice to use actual IPI's such as with openpic
- * rather than this.
+ * we do on the chrp. It would be nice to use actual IPI's such as with
+ * openpic rather than this.
* -- Cort
*/
int pmac_smp_message[NR_CPUS];
-void pmac_smp_message_recv(void)
+void pmac_smp_message_recv(struct pt_regs *regs)
{
- int msg = pmac_smp_message[smp_processor_id()];
-
+ int cpu = smp_processor_id();
+ int msg;
+
/* clear interrupt */
- out_be32(PSURGE_INTR, ~0);
-
- /* make sure msg is for us */
- if ( msg == -1 ) return;
+ if (cpu == 1)
+ out_be32(psurge_sec_intr, ~0);
+
+ if (smp_num_cpus < 2)
+ return;
+
+ /* make sure there is a message there */
+ msg = pmac_smp_message[cpu];
+ if (msg == 0)
+ return;
- smp_message_recv(msg);
-
/* reset message */
- pmac_smp_message[smp_processor_id()] = -1;
+ pmac_smp_message[cpu] = 0;
+
+ smp_message_recv(msg - 1, regs);
+}
+
+void
+pmac_primary_intr(int irq, void *d, struct pt_regs *regs)
+{
+ pmac_smp_message_recv(regs);
}
/*
@@ -171,7 +174,7 @@
void smp_send_tlb_invalidate(int cpu)
{
if ( (_get_PVR()>>16) == 8 )
- smp_message_pass(MSG_ALL_BUT_SELF, MSG_INVALIDATE_TLB, 0, 0);
+ smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_INVALIDATE_TLB, 0, 0);
}
void smp_send_reschedule(int cpu)
@@ -187,18 +190,135 @@
*/
/* This is only used if `cpu' is running an idle task,
so it will reschedule itself anyway... */
- smp_message_pass(cpu, MSG_RESCHEDULE, 0, 0);
+ smp_message_pass(cpu, PPC_MSG_RESCHEDULE, 0, 0);
+}
+
+#ifdef CONFIG_XMON
+void smp_send_xmon_break(int cpu)
+{
+ smp_message_pass(cpu, PPC_MSG_XMON_BREAK, 0, 0);
+}
+#endif /* CONFIG_XMON */
+
+static void stop_this_cpu(void *dummy)
+{
+ __cli();
+ while (1)
+ ;
}
void smp_send_stop(void)
{
- smp_message_pass(MSG_ALL_BUT_SELF, MSG_STOP_CPU, 0, 0);
+ smp_call_function(stop_this_cpu, NULL, 1, 0);
+ smp_num_cpus = 1;
+}
+
+/*
+ * Structure and data for smp_call_function(). This is designed to minimise
+ * static memory requirements. It also looks cleaner.
+ * Stolen from the i386 version.
+ */
+static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
+
+static volatile struct call_data_struct {
+ void (*func) (void *info);
+ void *info;
+ atomic_t started;
+ atomic_t finished;
+ int wait;
+} *call_data = NULL;
+
+/*
+ * this function sends a 'generic call function' IPI to all other CPUs
+ * in the system.
+ */
+
+int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
+ int wait)
+/*
+ * [SUMMARY] Run a function on all other CPUs.
+ * <func> The function to run. This must be fast and non-blocking.
+ * <info> An arbitrary pointer to pass to the function.
+ * <nonatomic> currently unused.
+ * <wait> If true, wait (atomically) until function has completed on other CPUs.
+ * [RETURNS] 0 on success, else a negative status code. Does not return until
+ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler, you may call it from a bottom half handler.
+ */
+{
+ struct call_data_struct data;
+ int ret = -1, cpus = smp_num_cpus-1;
+ int timeout;
+
+ if (!cpus)
+ return 0;
+
+ data.func = func;
+ data.info = info;
+ atomic_set(&data.started, 0);
+ data.wait = wait;
+ if (wait)
+ atomic_set(&data.finished, 0);
+
+ spin_lock_bh(&call_lock);
+ call_data = &data;
+ /* Send a message to all other CPUs and wait for them to respond */
+ smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION, 0, 0);
+
+ /* Wait for response */
+ timeout = 1000000;
+ while (atomic_read(&data.started) != cpus) {
+ if (--timeout == 0) {
+ printk("smp_call_function on cpu %d: other cpus not responding (%d)\n",
+ smp_processor_id(), atomic_read(&data.started));
+ goto out;
+ }
+ barrier();
+ udelay(1);
+ }
+
+ if (wait) {
+ timeout = 1000000;
+ while (atomic_read(&data.finished) != cpus) {
+ if (--timeout == 0) {
+ printk("smp_call_function on cpu %d: other cpus not finishing (%d/%d)\n",
+ smp_processor_id(), atomic_read(&data.finished), atomic_read(&data.started));
+ goto out;
+ }
+ barrier();
+ udelay(1);
+ }
+ }
+ ret = 0;
+
+ out:
+ spin_unlock_bh(&call_lock);
+ return ret;
+}
+
+void smp_call_function_interrupt(void)
+{
+ void (*func) (void *info) = call_data->func;
+ void *info = call_data->info;
+ int wait = call_data->wait;
+
+ /*
+ * Notify initiating CPU that I've grabbed the data and am
+ * about to execute the function
+ */
+ atomic_inc(&call_data->started);
+ /*
+ * At this point the info structure may be out of scope unless wait==1
+ */
+ (*func)(info);
+ if (wait)
+ atomic_inc(&call_data->finished);
}
void smp_message_pass(int target, int msg, unsigned long data, int wait)
{
- int i;
-
if ( !(_machine & (_MACH_Pmac|_MACH_chrp|_MACH_prep|_MACH_gemini)) )
return;
@@ -212,31 +332,29 @@
* the recipient won't know the message was destined
* for it. -- Cort
*/
- for ( i = 0; i <= smp_num_cpus ; i++ )
- pmac_smp_message[i] = -1;
- switch( target )
- {
- case MSG_ALL:
- pmac_smp_message[smp_processor_id()] = msg;
- /* fall through */
- case MSG_ALL_BUT_SELF:
- for ( i = 0 ; i < smp_num_cpus ; i++ )
- if ( i != smp_processor_id () )
- pmac_smp_message[i] = msg;
- break;
- default:
- pmac_smp_message[target] = msg;
- break;
+ if (smp_processor_id() == 0) {
+ /* primary cpu */
+ if (target == 1 || target == MSG_ALL_BUT_SELF
+ || target == MSG_ALL) {
+ pmac_smp_message[1] = msg + 1;
+ /* interrupt secondary processor */
+ out_be32(psurge_sec_intr, ~0);
+ out_be32(psurge_sec_intr, 0);
+ }
+ } else {
+ /* secondary cpu */
+ if (target == 0 || target == MSG_ALL_BUT_SELF
+ || target == MSG_ALL) {
+ pmac_smp_message[0] = msg + 1;
+ /* interrupt primary processor */
+ in_be32(psurge_pri_intr);
+ }
+ }
+ if (target == smp_processor_id() || target == MSG_ALL) {
+ /* sending a message to ourself */
+ /* XXX maybe we shouldn't do this if ints are off */
+ smp_message_recv(msg, NULL);
}
- /* interrupt secondary processor */
- out_be32(PSURGE_INTR, ~0);
- out_be32(PSURGE_INTR, 0);
- /*
- * Assume for now that the secondary doesn't send
- * IPI's -- Cort
- */
- /* interrupt primary */
- /**(volatile unsigned long *)(0xf3019000);*/
break;
case _MACH_chrp:
case _MACH_prep:
@@ -261,7 +379,7 @@
#else /* CONFIG_POWER4 */
/* for now, only do reschedule messages
since we only have one IPI */
- if (msg != MSG_RESCHEDULE)
+ if (msg != PPC_MSG_RESCHEDULE)
break;
for (i = 0; i < smp_num_cpus; ++i) {
if (target == MSG_ALL || target == i
@@ -319,7 +437,10 @@
{
case _MACH_Pmac:
/* assume powersurge board - 2 processors -- Cort */
- cpu_nr = 2;
+ cpu_nr = 2;
+ psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
+ psurge_sec_intr = ioremap(PSURGE_SEC_INTR, 4);
+ psurge_start = ioremap(PSURGE_START, 4);
break;
case _MACH_chrp:
if (OpenPIC)
@@ -370,13 +491,11 @@
{
case _MACH_Pmac:
/* setup entry point of secondary processor */
- *(volatile unsigned long *)(0xf2800000) =
- (unsigned long)__secondary_start_psurge-KERNELBASE;
- eieio();
+ out_be32(psurge_start, __pa(__secondary_start_psurge));
/* interrupt secondary to begin executing code */
- out_be32(PSURGE_INTR, ~0);
+ out_be32(psurge_sec_intr, ~0);
udelay(1);
- out_be32(PSURGE_INTR, 0);
+ out_be32(psurge_sec_intr, 0);
break;
case _MACH_chrp:
*(unsigned long *)KERNELBASE = i;
@@ -399,9 +518,6 @@
if ( cpu_callin_map[i] )
{
printk("Processor %d found.\n", i);
- /* this sync's the decr's -- Cort */
- if ( _machine == _MACH_Pmac )
- set_dec(decrementer_count);
smp_num_cpus++;
} else {
printk("Processor %d is stuck.\n", i);
@@ -415,9 +531,25 @@
{
/* reset the entry point so if we get another intr we won't
* try to startup again */
- *(volatile unsigned long *)(0xf2800000) = 0x100;
- /* send interrupt to other processors to start decr's on all cpus */
- smp_message_pass(1,0xf0f0, 0, 0);
+ out_be32(psurge_start, 0x100);
+ if (request_irq(30, pmac_primary_intr, 0, "primary IPI", 0))
+ printk(KERN_ERR "Couldn't get primary IPI interrupt");
+ /*
+ * The decrementers of both cpus are frozen at this point
+ * until we give the secondary cpu another interrupt.
+ * We set them both to decrementer_count and then send
+ * the interrupt. This should get the decrementers
+ * synchronized.
+ * -- paulus.
+ */
+ set_dec(tb_ticks_per_jiffy);
+ if ((_get_PVR() >> 16) != 1) {
+ set_tb(0, 0); /* set timebase if not 601 */
+ last_jiffy_stamp(0) = 0;
+ }
+ out_be32(psurge_sec_intr, ~0);
+ udelay(1);
+ out_be32(psurge_sec_intr, 0);
}
}
@@ -447,8 +579,11 @@
void __init smp_callin(void)
{
smp_store_cpu_info(current->processor);
- set_dec(decrementer_count);
-
+ set_dec(tb_ticks_per_jiffy);
+ if (_machine == _MACH_Pmac && (_get_PVR() >> 16) != 1) {
+ set_tb(0, 0); /* set timebase if not 601 */
+ last_jiffy_stamp(current->processor) = 0;
+ }
init_idle();
cpu_callin_map[current->processor] = 1;
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)