patch-2.4.25 linux-2.4.25/arch/ppc64/kernel/process.c

Next file: linux-2.4.25/arch/ppc64/kernel/prom.c
Previous file: linux-2.4.25/arch/ppc64/kernel/proc_pmc.c
Back to the patch index
Back to the overall index

diff -urN linux-2.4.24/arch/ppc64/kernel/process.c linux-2.4.25/arch/ppc64/kernel/process.c
@@ -10,6 +10,9 @@
  *  PowerPC version 
  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  *
+ *  VMX/Altivec port from ppc32 (c) IBM 2003
+ *   Denis Joseph Barrow (dj@de.ibm.com,barrow_dj@yahoo.com)
+ *
  *  This program is free software; you can redistribute it and/or
  *  modify it under the terms of the GNU General Public License
  *  as published by the Free Software Foundation; either version
@@ -46,7 +49,10 @@
 
 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs);
 
+#ifndef CONFIG_SMP
 struct task_struct *last_task_used_math = NULL;
+struct task_struct *last_task_used_altivec = NULL;
+#endif /* CONFIG_SMP */
 static struct fs_struct init_fs = INIT_FS;
 static struct files_struct init_files = INIT_FILES;
 static struct signal_struct init_signals = INIT_SIGNALS;
@@ -93,6 +99,32 @@
 	return 1;
 }
 
+#ifdef CONFIG_ALTIVEC
+int
+dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
+{
+	if (regs->msr & MSR_VEC)
+		giveup_altivec(current);
+	memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
+	return 1;
+}
+
+
+void
+enable_kernel_altivec(void)
+{
+#ifdef CONFIG_SMP
+	if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
+		giveup_altivec(current);
+	else
+		giveup_altivec(NULL);	/* just enable AltiVec for kernel - force */
+#else
+	giveup_altivec(last_task_used_altivec);
+#endif /* __SMP __ */
+}
+#endif /* CONFIG_ALTIVEC */
+
+
 void
 _switch_to(struct task_struct *prev, struct task_struct *new,
 	  struct task_struct **last)
@@ -121,7 +153,21 @@
 	 */
 	if ( prev->thread.regs && (prev->thread.regs->msr & MSR_FP) )
 		giveup_fpu(prev);
-
+#ifdef CONFIG_ALTIVEC
+	/*
+	 * If the previous thread used altivec in the last quantum
+	 * (thus changing altivec regs) then save them.
+	 * We used to check the VRSAVE register but not all apps
+	 * set it, so we don't rely on it now (and in fact we need
+	 * to save & restore VSCR even if VRSAVE == 0).  -- paulus
+	 *
+	 * On SMP we always save/restore altivec regs just to avoid the
+	 * complexity of changing processors.
+	 *  -- Cort
+	 */
+	if ((prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)))
+		giveup_altivec(prev);
+#endif /* CONFIG_ALTIVEC */
 	/* prev->last_processor = prev->processor; */
 	current_set[smp_processor_id()].task = new;
 #endif /* CONFIG_SMP */
@@ -145,8 +191,11 @@
 	printk("TASK = %p[%d] '%s' ",
 	       current, current->pid, current->comm);
 	printk("Last syscall: %ld ", current->thread.last_syscall);
-	printk("\nlast math %p ", last_task_used_math);
-	
+#ifndef CONFIG_SMP
+	printk("\nlast math %p last altivec %p", last_task_used_math,
+	       last_task_used_altivec);
+#endif
+
 #ifdef CONFIG_SMP
 	/* printk(" CPU: %d last CPU: %d", current->processor,current->last_processor); */
 #endif /* CONFIG_SMP */
@@ -173,14 +222,22 @@
 
 void exit_thread(void)
 {
+#ifndef CONFIG_SMP
 	if (last_task_used_math == current)
 		last_task_used_math = NULL;
+	if (last_task_used_altivec == current)
+		last_task_used_altivec = NULL;
+#endif
 }
 
 void flush_thread(void)
 {
+#ifndef CONFIG_SMP
 	if (last_task_used_math == current)
 		last_task_used_math = NULL;
+	if (last_task_used_altivec == current)
+		last_task_used_altivec = NULL;
+#endif
 }
 
 void
@@ -225,7 +282,6 @@
 		/* Stack is in kernel space - must adjust */
 		childregs->gpr[1] = (unsigned long)(childregs + 1);
 		*((unsigned long *) childregs->gpr[1]) = 0;
-		childregs->gpr[13] = (unsigned long) p;
 	} else {
 		/* Provided stack is in user space */
 		childregs->gpr[1] = usp;
@@ -244,6 +300,18 @@
 	p->thread.fpscr = current->thread.fpscr;
 	p->thread.fpexc_mode = current->thread.fpexc_mode;
 
+#ifdef CONFIG_ALTIVEC
+	/*
+	 * copy altiVec info - assume lazy altiVec switch
+	 * - kumar
+	 */
+	if (regs->msr & MSR_VEC)
+		giveup_altivec(current);
+	memcpy(&p->thread.vr, &current->thread.vr, sizeof(p->thread.vr));
+	p->thread.vscr = current->thread.vscr;
+	childregs->msr &= ~MSR_VEC;
+#endif /* CONFIG_ALTIVEC */
+
 	return 0;
 }
 
@@ -275,9 +343,19 @@
 	regs->gpr[1] = sp;
 	regs->gpr[2] = toc;
 	regs->msr = MSR_USER64;
+#ifndef CONFIG_SMP
 	if (last_task_used_math == current)
 		last_task_used_math = 0;
+	if (last_task_used_altivec == current)
+		last_task_used_altivec = 0;
+#endif /* CONFIG_SMP */
+	memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
 	current->thread.fpscr = 0;
+#ifdef CONFIG_ALTIVEC
+	memset(&current->thread.vr[0], 0,offsetof(struct thread_struct,vrsave[2])-
+	       offsetof(struct thread_struct,vr[0]));
+	current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
+#endif /* CONFIG_ALTIVEC */
 }
 
 # define PR_FP_EXC_DISABLED     0       /* FP exceptions disabled */
@@ -337,7 +415,10 @@
 		goto out;
 	if (regs->msr & MSR_FP)
 		giveup_fpu(current);
-  
+#ifdef CONFIG_ALTIVEC
+	if (regs->msr & MSR_VEC)
+		giveup_altivec(current);
+#endif /* CONFIG_ALTIVEC */
 	error = do_execve(filename, (char **) a1, (char **) a2, regs);
   
 	if (error == 0)

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)