--- linux-lowlatency-3.2.0.orig/MAINTAINERS +++ linux-lowlatency-3.2.0/MAINTAINERS @@ -4901,6 +4901,13 @@ F: include/scsi/osd_* F: fs/exofs/ +OVERLAYFS FILESYSTEM +M: Miklos Szeredi +L: linux-fsdevel@vger.kernel.org +S: Supported +F: fs/overlayfs/* +F: Documentation/filesystems/overlayfs.txt + P54 WIRELESS DRIVER M: Christian Lamparter L: linux-wireless@vger.kernel.org @@ -6258,7 +6265,7 @@ STABLE BRANCH M: Greg Kroah-Hartman -L: stable@kernel.org +L: stable@vger.kernel.org S: Maintained STAGING SUBSYSTEM --- linux-lowlatency-3.2.0.orig/dropped.txt +++ linux-lowlatency-3.2.0/dropped.txt @@ -0,0 +1 @@ +UBUNTU: SAUCE: (no-up) Restore VT fonts on switch --- linux-lowlatency-3.2.0.orig/MAINTENANCE +++ linux-lowlatency-3.2.0/MAINTENANCE @@ -0,0 +1,11 @@ + +This kernel can be maintained by following the Ubuntu git repository. +For example: + +git remote add precise git://kernel.ubuntu.com/ubuntu/ubuntu-precise.git +git fetch precise +git fetch precise master +git rebase refs/remotes/precise/master +fakeroot debian/rules startnewrelease insertchanges +git add -u +git commit -s -m"low-latency: rebased against ubuntu-precise master" --- linux-lowlatency-3.2.0.orig/Makefile +++ linux-lowlatency-3.2.0/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 2 -SUBLEVEL = 0 +SUBLEVEL = 9 EXTRAVERSION = NAME = Saber-toothed Squirrel @@ -354,14 +354,23 @@ AFLAGS_KERNEL = CFLAGS_GCOV = -fprofile-arcs -ftest-coverage +# Prefer linux-backports-modules +ifneq ($(KBUILD_SRC),) +ifneq ($(shell if test -e $(KBUILD_OUTPUT)/ubuntu-build; then echo yes; fi),yes) +UBUNTUINCLUDE := -I/usr/src/linux-headers-lbm-$(KERNELRELEASE) +endif +endif # Use LINUXINCLUDE when you must reference the include/ directory. # Needed to be compatible with the O= option -LINUXINCLUDE := -I$(srctree)/arch/$(hdr-arch)/include \ +LINUXINCLUDE := $(UBUNTUINCLUDE) -I$(srctree)/arch/$(hdr-arch)/include \ -Iarch/$(hdr-arch)/include/generated -Iinclude \ $(if $(KBUILD_SRC), -I$(srctree)/include) \ -include $(srctree)/include/linux/kconfig.h +# UBUNTU: Include our third party driver stuff too +LINUXINCLUDE += -Iubuntu/include $(if $(KBUILD_SRC),-I$(srctree)/ubuntu/include) + KBUILD_CPPFLAGS := -D__KERNEL__ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ @@ -507,7 +516,7 @@ # Objects we will link into vmlinux / subdirs we need to visit init-y := init/ -drivers-y := drivers/ sound/ firmware/ +drivers-y := drivers/ sound/ firmware/ ubuntu/ net-y := net/ libs-y := lib/ core-y := usr/ @@ -1060,6 +1069,7 @@ $(error Headers not exportable for the $(SRCARCH) architecture)) $(Q)$(MAKE) $(hdr-inst)=include $(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/asm $(hdr-dst) + $(Q)$(MAKE) $(hdr-inst)=ubuntu/include dst=include oldheaders= PHONY += headers_check_all headers_check_all: headers_install_all @@ -1069,6 +1079,7 @@ headers_check: headers_install $(Q)$(MAKE) $(hdr-inst)=include HDRCHECK=1 $(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/asm $(hdr-dst) HDRCHECK=1 + $(Q)$(MAKE) $(hdr-inst)=ubuntu/include dst=include oldheaders= HDRCHECK=1 # --------------------------------------------------------------------------- # Modules --- linux-lowlatency-3.2.0.orig/tools/perf/bench/mem-memcpy-x86-64-asm.S +++ linux-lowlatency-3.2.0/tools/perf/bench/mem-memcpy-x86-64-asm.S @@ -1,2 +1,8 @@ #include "../../../arch/x86/lib/memcpy_64.S" +/* + * We need to provide note.GNU-stack section, saying that we want + * NOT executable stack. Otherwise the final linking will assume that + * the ELF stack should not be restricted at all and set it RWX. + */ +.section .note.GNU-stack,"",@progbits --- linux-lowlatency-3.2.0.orig/tools/perf/util/trace-event-parse.c +++ linux-lowlatency-3.2.0/tools/perf/util/trace-event-parse.c @@ -1582,6 +1582,8 @@ field = malloc_or_die(sizeof(*field)); type = process_arg(event, field, &token); + while (type == EVENT_OP) + type = process_op(event, field, &token); if (test_type_token(type, token, EVENT_DELIM, ",")) goto out_free; --- linux-lowlatency-3.2.0.orig/tools/perf/util/evsel.c +++ linux-lowlatency-3.2.0/tools/perf/util/evsel.c @@ -390,6 +390,7 @@ data->cpu = data->pid = data->tid = -1; data->stream_id = data->id = data->time = -1ULL; + data->period = 1; if (event->header.type != PERF_RECORD_SAMPLE) { if (!sample_id_all) --- linux-lowlatency-3.2.0.orig/arch/powerpc/Kconfig +++ linux-lowlatency-3.2.0/arch/powerpc/Kconfig @@ -952,6 +952,8 @@ source "drivers/Kconfig" +source "ubuntu/Kconfig" + source "fs/Kconfig" source "arch/powerpc/sysdev/qe_lib/Kconfig" --- linux-lowlatency-3.2.0.orig/arch/powerpc/kernel/irq.c +++ linux-lowlatency-3.2.0/arch/powerpc/kernel/irq.c @@ -164,16 +164,13 @@ */ local_paca->hard_enabled = en; -#ifndef CONFIG_BOOKE - /* On server, re-trigger the decrementer if it went negative since - * some processors only trigger on edge transitions of the sign bit. - * - * BookE has a level sensitive decrementer (latches in TSR) so we - * don't need that + /* + * Trigger the decrementer if we have a pending event. Some processors + * only trigger on edge transitions of the sign bit. We might also + * have disabled interrupts long enough that the decrementer wrapped + * to positive. */ - if ((int)mfspr(SPRN_DEC) < 0) - mtspr(SPRN_DEC, 1); -#endif /* CONFIG_BOOKE */ + decrementer_check_overflow(); /* * Force the delivery of pending soft-disabled interrupts on PS3. --- linux-lowlatency-3.2.0.orig/arch/powerpc/kernel/time.c +++ linux-lowlatency-3.2.0/arch/powerpc/kernel/time.c @@ -889,6 +889,15 @@ clock->name, clock->mult, clock->shift); } +void decrementer_check_overflow(void) +{ + u64 now = get_tb_or_rtc(); + struct decrementer_clock *decrementer = &__get_cpu_var(decrementers); + + if (now >= decrementer->next_tb) + set_dec(1); +} + static int decrementer_set_next_event(unsigned long evt, struct clock_event_device *dev) { --- linux-lowlatency-3.2.0.orig/arch/powerpc/kernel/perf_event.c +++ linux-lowlatency-3.2.0/arch/powerpc/kernel/perf_event.c @@ -865,6 +865,7 @@ { unsigned long flags; s64 left; + unsigned long val; if (!event->hw.idx || !event->hw.sample_period) return; @@ -880,7 +881,12 @@ event->hw.state = 0; left = local64_read(&event->hw.period_left); - write_pmc(event->hw.idx, left); + + val = 0; + if (left < 0x80000000L) + val = 0x80000000L - left; + + write_pmc(event->hw.idx, val); perf_event_update_userpage(event); perf_pmu_enable(event->pmu); --- linux-lowlatency-3.2.0.orig/arch/powerpc/platforms/pseries/hvCall_inst.c +++ linux-lowlatency-3.2.0/arch/powerpc/platforms/pseries/hvCall_inst.c @@ -109,7 +109,7 @@ if (opcode > MAX_HCALL_OPCODE) return; - h = &get_cpu_var(hcall_stats)[opcode / 4]; + h = &__get_cpu_var(hcall_stats)[opcode / 4]; h->tb_start = mftb(); h->purr_start = mfspr(SPRN_PURR); } @@ -126,8 +126,6 @@ h->num_calls++; h->tb_total += mftb() - h->tb_start; h->purr_total += mfspr(SPRN_PURR) - h->purr_start; - - put_cpu_var(hcall_stats); } static int __init hcall_inst_init(void) --- linux-lowlatency-3.2.0.orig/arch/powerpc/platforms/pseries/lpar.c +++ linux-lowlatency-3.2.0/arch/powerpc/platforms/pseries/lpar.c @@ -554,6 +554,7 @@ goto out; (*depth)++; + preempt_disable(); trace_hcall_entry(opcode, args); (*depth)--; @@ -576,6 +577,7 @@ (*depth)++; trace_hcall_exit(opcode, retval, retbuf); + preempt_enable(); (*depth)--; out: --- linux-lowlatency-3.2.0.orig/arch/powerpc/include/asm/time.h +++ linux-lowlatency-3.2.0/arch/powerpc/include/asm/time.h @@ -219,5 +219,7 @@ extern void secondary_cpu_time_init(void); extern void iSeries_time_init_early(void); +extern void decrementer_check_overflow(void); + #endif /* __KERNEL__ */ #endif /* __POWERPC_TIME_H */ --- linux-lowlatency-3.2.0.orig/arch/arm/Kconfig +++ linux-lowlatency-3.2.0/arch/arm/Kconfig @@ -2002,7 +2002,7 @@ config KEXEC bool "Kexec system call (EXPERIMENTAL)" - depends on EXPERIMENTAL + depends on EXPERIMENTAL && (!SMP || HOTPLUG_CPU) help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot @@ -2238,6 +2238,8 @@ source "drivers/Kconfig" +source "ubuntu/Kconfig" + source "fs/Kconfig" source "arch/arm/Kconfig.debug" --- linux-lowlatency-3.2.0.orig/arch/arm/plat-mxc/system.c +++ linux-lowlatency-3.2.0/arch/arm/plat-mxc/system.c @@ -71,7 +71,7 @@ mdelay(50); /* we'll take a jump through zero as a poor second */ - cpu_reset(0); + soft_restart(0); } void mxc_arch_reset_init(void __iomem *base) --- linux-lowlatency-3.2.0.orig/arch/arm/mach-clps711x/include/mach/system.h +++ linux-lowlatency-3.2.0/arch/arm/mach-clps711x/include/mach/system.h @@ -34,7 +34,7 @@ static inline void arch_reset(char mode, const char *cmd) { - cpu_reset(0); + soft_restart(0); } #endif --- linux-lowlatency-3.2.0.orig/arch/arm/mach-pxa/reset.c +++ linux-lowlatency-3.2.0/arch/arm/mach-pxa/reset.c @@ -88,7 +88,7 @@ switch (mode) { case 's': /* Jump into ROM at address 0 */ - cpu_reset(0); + soft_restart(0); break; case 'g': do_gpio_reset(); --- linux-lowlatency-3.2.0.orig/arch/arm/plat-omap/include/plat/io.h +++ linux-lowlatency-3.2.0/arch/arm/plat-omap/include/plat/io.h @@ -257,74 +257,6 @@ extern void omap_writel(u32 v, u32 pa); struct omap_sdrc_params; - -#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850) -void omap7xx_map_io(void); -#else -static inline void omap_map_io(void) -{ -} -#endif - -#ifdef CONFIG_ARCH_OMAP15XX -void omap15xx_map_io(void); -#else -static inline void omap15xx_map_io(void) -{ -} -#endif - -#ifdef CONFIG_ARCH_OMAP16XX -void omap16xx_map_io(void); -#else -static inline void omap16xx_map_io(void) -{ -} -#endif - -void omap1_init_early(void); - -#ifdef CONFIG_SOC_OMAP2420 -extern void omap242x_map_common_io(void); -#else -static inline void omap242x_map_common_io(void) -{ -} -#endif - -#ifdef CONFIG_SOC_OMAP2430 -extern void omap243x_map_common_io(void); -#else -static inline void omap243x_map_common_io(void) -{ -} -#endif - -#ifdef CONFIG_ARCH_OMAP3 -extern void omap34xx_map_common_io(void); -#else -static inline void omap34xx_map_common_io(void) -{ -} -#endif - -#ifdef CONFIG_SOC_OMAPTI816X -extern void omapti816x_map_common_io(void); -#else -static inline void omapti816x_map_common_io(void) -{ -} -#endif - -#ifdef CONFIG_ARCH_OMAP4 -extern void omap44xx_map_common_io(void); -#else -static inline void omap44xx_map_common_io(void) -{ -} -#endif - -extern void omap2_init_common_infrastructure(void); extern void omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0, struct omap_sdrc_params *sdrc_cs1); --- linux-lowlatency-3.2.0.orig/arch/arm/plat-omap/include/plat/irqs.h +++ linux-lowlatency-3.2.0/arch/arm/plat-omap/include/plat/irqs.h @@ -438,16 +438,6 @@ #ifndef __ASSEMBLY__ extern void __iomem *omap_irq_base; -void omap1_init_irq(void); -void omap2_init_irq(void); -void omap3_init_irq(void); -void ti816x_init_irq(void); -extern int omap_irq_pending(void); -void omap_intc_save_context(void); -void omap_intc_restore_context(void); -void omap3_intc_suspend(void); -void omap3_intc_prepare_idle(void); -void omap3_intc_resume_idle(void); #endif #include --- linux-lowlatency-3.2.0.orig/arch/arm/plat-omap/include/plat/system.h +++ linux-lowlatency-3.2.0/arch/arm/plat-omap/include/plat/system.h @@ -12,6 +12,8 @@ cpu_do_idle(); } -extern void (*arch_reset)(char, const char *); +static inline void arch_reset(char mode, const char *cmd) +{ +} #endif --- linux-lowlatency-3.2.0.orig/arch/arm/plat-omap/include/plat/common.h +++ linux-lowlatency-3.2.0/arch/arm/plat-omap/include/plat/common.h @@ -27,97 +27,16 @@ #ifndef __ARCH_ARM_MACH_OMAP_COMMON_H #define __ARCH_ARM_MACH_OMAP_COMMON_H -#include - #include #include -struct sys_timer; - -extern void omap_map_common_io(void); -extern struct sys_timer omap1_timer; -extern struct sys_timer omap2_timer; -extern struct sys_timer omap3_timer; -extern struct sys_timer omap3_secure_timer; -extern struct sys_timer omap4_timer; -extern bool omap_32k_timer_init(void); extern int __init omap_init_clocksource_32k(void); extern unsigned long long notrace omap_32k_sched_clock(void); extern void omap_reserve(void); -void omap2420_init_early(void); -void omap2430_init_early(void); -void omap3430_init_early(void); -void omap35xx_init_early(void); -void omap3630_init_early(void); -void omap3_init_early(void); /* Do not use this one */ -void am35xx_init_early(void); -void ti816x_init_early(void); -void omap4430_init_early(void); - extern int omap_dss_reset(struct omap_hwmod *); void omap_sram_init(void); -/* - * IO bases for various OMAP processors - * Except the tap base, rest all the io bases - * listed are physical addresses. - */ -struct omap_globals { - u32 class; /* OMAP class to detect */ - void __iomem *tap; /* Control module ID code */ - void __iomem *sdrc; /* SDRAM Controller */ - void __iomem *sms; /* SDRAM Memory Scheduler */ - void __iomem *ctrl; /* System Control Module */ - void __iomem *ctrl_pad; /* PAD Control Module */ - void __iomem *prm; /* Power and Reset Management */ - void __iomem *cm; /* Clock Management */ - void __iomem *cm2; -}; - -void omap2_set_globals_242x(void); -void omap2_set_globals_243x(void); -void omap2_set_globals_3xxx(void); -void omap2_set_globals_443x(void); -void omap2_set_globals_ti816x(void); - -/* These get called from omap2_set_globals_xxxx(), do not call these */ -void omap2_set_globals_tap(struct omap_globals *); -void omap2_set_globals_sdrc(struct omap_globals *); -void omap2_set_globals_control(struct omap_globals *); -void omap2_set_globals_prcm(struct omap_globals *); - -void omap242x_map_io(void); -void omap243x_map_io(void); -void omap3_map_io(void); -void omap4_map_io(void); - - -/** - * omap_test_timeout - busy-loop, testing a condition - * @cond: condition to test until it evaluates to true - * @timeout: maximum number of microseconds in the timeout - * @index: loop index (integer) - * - * Loop waiting for @cond to become true or until at least @timeout - * microseconds have passed. To use, define some integer @index in the - * calling code. After running, if @index == @timeout, then the loop has - * timed out. - */ -#define omap_test_timeout(cond, timeout, index) \ -({ \ - for (index = 0; index < timeout; index++) { \ - if (cond) \ - break; \ - udelay(1); \ - } \ -}) - -extern struct device *omap2_get_mpuss_device(void); -extern struct device *omap2_get_iva_device(void); -extern struct device *omap2_get_l3_device(void); -extern struct device *omap4_get_dsp_device(void); - #endif /* __ARCH_ARM_MACH_OMAP_COMMON_H */ --- linux-lowlatency-3.2.0.orig/arch/arm/kernel/head.S +++ linux-lowlatency-3.2.0/arch/arm/kernel/head.S @@ -170,11 +170,11 @@ * Create identity mapping to cater for __enable_mmu. * This identity mapping will be removed by paging_init(). */ - adr r0, __enable_mmu_loc + adr r0, __turn_mmu_on_loc ldmia r0, {r3, r5, r6} sub r0, r0, r3 @ virt->phys offset - add r5, r5, r0 @ phys __enable_mmu - add r6, r6, r0 @ phys __enable_mmu_end + add r5, r5, r0 @ phys __turn_mmu_on + add r6, r6, r0 @ phys __turn_mmu_on_end mov r5, r5, lsr #SECTION_SHIFT mov r6, r6, lsr #SECTION_SHIFT @@ -287,10 +287,10 @@ ENDPROC(__create_page_tables) .ltorg .align -__enable_mmu_loc: +__turn_mmu_on_loc: .long . - .long __enable_mmu - .long __enable_mmu_end + .long __turn_mmu_on + .long __turn_mmu_on_end #if defined(CONFIG_SMP) __CPUINIT @@ -398,15 +398,17 @@ * other registers depend on the function called upon completion */ .align 5 -__turn_mmu_on: + .pushsection .idmap.text, "ax" +ENTRY(__turn_mmu_on) mov r0, r0 mcr p15, 0, r0, c1, c0, 0 @ write control reg mrc p15, 0, r3, c0, c0, 0 @ read id reg mov r3, r3 mov r3, r13 mov pc, r3 -__enable_mmu_end: +__turn_mmu_on_end: ENDPROC(__turn_mmu_on) + .popsection #ifdef CONFIG_SMP_ON_UP --- linux-lowlatency-3.2.0.orig/arch/arm/kernel/ptrace.c +++ linux-lowlatency-3.2.0/arch/arm/kernel/ptrace.c @@ -699,10 +699,13 @@ { int ret; struct thread_info *thread = task_thread_info(target); - struct vfp_hard_struct new_vfp = thread->vfpstate.hard; + struct vfp_hard_struct new_vfp; const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs); const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr); + vfp_sync_hwstate(thread); + new_vfp = thread->vfpstate.hard; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &new_vfp.fpregs, user_fpregs_offset, @@ -723,9 +726,8 @@ if (ret) return ret; - vfp_sync_hwstate(thread); - thread->vfpstate.hard = new_vfp; vfp_flush_hwstate(thread); + thread->vfpstate.hard = new_vfp; return 0; } --- linux-lowlatency-3.2.0.orig/arch/arm/kernel/signal.c +++ linux-lowlatency-3.2.0/arch/arm/kernel/signal.c @@ -227,6 +227,8 @@ if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) return -EINVAL; + vfp_flush_hwstate(thread); + /* * Copy the floating point registers. There can be unused * registers see asm/hwcap.h for details. @@ -251,9 +253,6 @@ __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); - if (!err) - vfp_flush_hwstate(thread); - return err ? -EFAULT : 0; } --- linux-lowlatency-3.2.0.orig/arch/arm/kernel/sleep.S +++ linux-lowlatency-3.2.0/arch/arm/kernel/sleep.S @@ -54,6 +54,7 @@ * r0 = control register value */ .align 5 + .pushsection .idmap.text,"ax" ENTRY(cpu_resume_mmu) ldr r3, =cpu_resume_after_mmu mcr p15, 0, r0, c1, c0, 0 @ turn on MMU, I-cache, etc @@ -62,6 +63,7 @@ mov r0, r0 mov pc, r3 @ jump to virtual address ENDPROC(cpu_resume_mmu) + .popsection cpu_resume_after_mmu: bl cpu_init @ restore the und/abt/irq banked regs mov r0, #0 @ return zero on success --- linux-lowlatency-3.2.0.orig/arch/arm/kernel/setup.c +++ linux-lowlatency-3.2.0/arch/arm/kernel/setup.c @@ -908,8 +908,8 @@ arm_dma_zone_size = mdesc->dma_zone_size; } #endif - if (mdesc->soft_reboot) - reboot_setup("s"); + if (mdesc->restart_mode) + reboot_setup(&mdesc->restart_mode); init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; @@ -928,6 +928,9 @@ paging_init(mdesc); request_standard_resources(mdesc); + if (mdesc->restart) + arm_pm_restart = mdesc->restart; + unflatten_device_tree(); #ifdef CONFIG_SMP --- linux-lowlatency-3.2.0.orig/arch/arm/kernel/vmlinux.lds.S +++ linux-lowlatency-3.2.0/arch/arm/kernel/vmlinux.lds.S @@ -13,6 +13,12 @@ *(.proc.info.init) \ VMLINUX_SYMBOL(__proc_info_end) = .; +#define IDMAP_TEXT \ + ALIGN_FUNCTION(); \ + VMLINUX_SYMBOL(__idmap_text_start) = .; \ + *(.idmap.text) \ + VMLINUX_SYMBOL(__idmap_text_end) = .; + #ifdef CONFIG_HOTPLUG_CPU #define ARM_CPU_DISCARD(x) #define ARM_CPU_KEEP(x) x @@ -92,6 +98,7 @@ SCHED_TEXT LOCK_TEXT KPROBES_TEXT + IDMAP_TEXT #ifdef CONFIG_MMU *(.fixup) #endif --- linux-lowlatency-3.2.0.orig/arch/arm/kernel/machine_kexec.c +++ linux-lowlatency-3.2.0/arch/arm/kernel/machine_kexec.c @@ -12,12 +12,11 @@ #include #include #include +#include extern const unsigned char relocate_new_kernel[]; extern const unsigned int relocate_new_kernel_size; -extern void setup_mm_for_reboot(char mode); - extern unsigned long kexec_start_address; extern unsigned long kexec_indirection_page; extern unsigned long kexec_mach_type; @@ -111,14 +110,6 @@ if (kexec_reinit) kexec_reinit(); - local_irq_disable(); - local_fiq_disable(); - setup_mm_for_reboot(0); /* mode is not used, so just pass 0*/ - flush_cache_all(); - outer_flush_all(); - outer_disable(); - cpu_proc_fin(); - outer_inv_all(); - flush_cache_all(); - cpu_reset(reboot_code_buffer_phys); + + soft_restart(reboot_code_buffer_phys); } --- linux-lowlatency-3.2.0.orig/arch/arm/kernel/suspend.c +++ linux-lowlatency-3.2.0/arch/arm/kernel/suspend.c @@ -1,13 +1,12 @@ #include +#include #include #include #include #include #include -static pgd_t *suspend_pgd; - extern int __cpu_suspend(unsigned long, int (*)(unsigned long)); extern void cpu_resume_mmu(void); @@ -21,7 +20,7 @@ *save_ptr = virt_to_phys(ptr); /* This must correspond to the LDM in cpu_resume() assembly */ - *ptr++ = virt_to_phys(suspend_pgd); + *ptr++ = virt_to_phys(idmap_pgd); *ptr++ = sp; *ptr++ = virt_to_phys(cpu_do_resume); @@ -42,7 +41,7 @@ struct mm_struct *mm = current->active_mm; int ret; - if (!suspend_pgd) + if (!idmap_pgd) return -EINVAL; /* @@ -59,14 +58,3 @@ return ret; } - -static int __init cpu_suspend_init(void) -{ - suspend_pgd = pgd_alloc(&init_mm); - if (suspend_pgd) { - unsigned long addr = virt_to_phys(cpu_resume_mmu); - identity_mapping_add(suspend_pgd, addr, addr + SECTION_SIZE); - } - return suspend_pgd ? 0 : -ENOMEM; -} -core_initcall(cpu_suspend_init); --- linux-lowlatency-3.2.0.orig/arch/arm/kernel/process.c +++ linux-lowlatency-3.2.0/arch/arm/kernel/process.c @@ -57,7 +57,7 @@ "ARM" , "Thumb" , "Jazelle", "ThumbEE" }; -extern void setup_mm_for_reboot(char mode); +extern void setup_mm_for_reboot(void); static volatile int hlt_counter; @@ -92,18 +92,24 @@ __setup("nohlt", nohlt_setup); __setup("hlt", hlt_setup); -void arm_machine_restart(char mode, const char *cmd) +extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); +typedef void (*phys_reset_t)(unsigned long); + +/* + * A temporary stack to use for CPU reset. This is static so that we + * don't clobber it with the identity mapping. When running with this + * stack, any references to the current task *will not work* so you + * should really do as little as possible before jumping to your reset + * code. + */ +static u64 soft_restart_stack[16]; + +static void __soft_restart(void *addr) { - /* Disable interrupts first */ - local_irq_disable(); - local_fiq_disable(); + phys_reset_t phys_reset; - /* - * Tell the mm system that we are going to reboot - - * we may need it to insert some 1:1 mappings so that - * soft boot works. - */ - setup_mm_for_reboot(mode); + /* Take out a flat memory mapping. */ + setup_mm_for_reboot(); /* Clean and invalidate caches */ flush_cache_all(); @@ -114,18 +120,41 @@ /* Push out any further dirty data, and ensure cache is empty */ flush_cache_all(); - /* - * Now call the architecture specific reboot code. - */ - arch_reset(mode, cmd); + /* Switch to the identity mapping. */ + phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); + phys_reset((unsigned long)addr); - /* - * Whoops - the architecture was unable to reboot. - * Tell the user! - */ - mdelay(1000); - printk("Reboot failed -- System halted\n"); - while (1); + /* Should never get here. */ + BUG(); +} + +void soft_restart(unsigned long addr) +{ + u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack); + + /* Disable interrupts first */ + local_irq_disable(); + local_fiq_disable(); + + /* Disable the L2 if we're the last man standing. */ + if (num_online_cpus() == 1) + outer_disable(); + + /* Change to the new stack and continue with the reset. */ + call_with_stack(__soft_restart, (void *)addr, (void *)stack); + + /* Should never get here. */ + BUG(); +} + +void arm_machine_restart(char mode, const char *cmd) +{ + /* Disable interrupts first */ + local_irq_disable(); + local_fiq_disable(); + + /* Call the architecture specific reboot code. */ + arch_reset(mode, cmd); } /* @@ -253,7 +282,15 @@ void machine_restart(char *cmd) { machine_shutdown(); + arm_pm_restart(reboot_mode, cmd); + + /* Give a grace period for failure to restart of 1s */ + mdelay(1000); + + /* Whoops - the platform was unable to reboot. Tell the user! */ + printk("Reboot failed -- System halted\n"); + while (1); } void __show_regs(struct pt_regs *regs) --- linux-lowlatency-3.2.0.orig/arch/arm/kernel/smp.c +++ linux-lowlatency-3.2.0/arch/arm/kernel/smp.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -61,7 +62,6 @@ { struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); struct task_struct *idle = ci->idle; - pgd_t *pgd; int ret; /* @@ -84,29 +84,11 @@ } /* - * Allocate initial page tables to allow the new CPU to - * enable the MMU safely. This essentially means a set - * of our "standard" page tables, with the addition of - * a 1:1 mapping for the physical address of the kernel. - */ - pgd = pgd_alloc(&init_mm); - if (!pgd) - return -ENOMEM; - - if (PHYS_OFFSET != PAGE_OFFSET) { -#ifndef CONFIG_HOTPLUG_CPU - identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end)); -#endif - identity_mapping_add(pgd, __pa(_stext), __pa(_etext)); - identity_mapping_add(pgd, __pa(_sdata), __pa(_edata)); - } - - /* * We need to tell the secondary core where to find * its stack and the page tables. */ secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; - secondary_data.pgdir = virt_to_phys(pgd); + secondary_data.pgdir = virt_to_phys(idmap_pgd); secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir); __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); @@ -142,16 +124,6 @@ secondary_data.stack = NULL; secondary_data.pgdir = 0; - if (PHYS_OFFSET != PAGE_OFFSET) { -#ifndef CONFIG_HOTPLUG_CPU - identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end)); -#endif - identity_mapping_del(pgd, __pa(_stext), __pa(_etext)); - identity_mapping_del(pgd, __pa(_sdata), __pa(_edata)); - } - - pgd_free(&init_mm, pgd); - return ret; } @@ -550,6 +522,10 @@ local_fiq_disable(); local_irq_disable(); +#ifdef CONFIG_HOTPLUG_CPU + platform_cpu_kill(cpu); +#endif + while (1) cpu_relax(); } --- linux-lowlatency-3.2.0.orig/arch/arm/mach-ebsa110/core.c +++ linux-lowlatency-3.2.0/arch/arm/mach-ebsa110/core.c @@ -283,7 +283,7 @@ .atag_offset = 0x400, .reserve_lp0 = 1, .reserve_lp2 = 1, - .soft_reboot = 1, + .restart_mode = 's', .map_io = ebsa110_map_io, .init_irq = ebsa110_init_irq, .timer = &ebsa110_timer, --- linux-lowlatency-3.2.0.orig/arch/arm/mach-ebsa110/include/mach/system.h +++ linux-lowlatency-3.2.0/arch/arm/mach-ebsa110/include/mach/system.h @@ -34,6 +34,6 @@ asm volatile ("mcr p15, 0, ip, c15, c1, 2" : : : "cc"); } -#define arch_reset(mode, cmd) cpu_reset(0x80000000) +#define arch_reset(mode, cmd) soft_restart(0x80000000) #endif --- linux-lowlatency-3.2.0.orig/arch/arm/mach-iop33x/include/mach/system.h +++ linux-lowlatency-3.2.0/arch/arm/mach-iop33x/include/mach/system.h @@ -19,5 +19,5 @@ *IOP3XX_PCSR = 0x30; /* Jump into ROM at address 0 */ - cpu_reset(0); + soft_restart(0); } --- linux-lowlatency-3.2.0.orig/arch/arm/lib/call_with_stack.S +++ linux-lowlatency-3.2.0/arch/arm/lib/call_with_stack.S @@ -0,0 +1,44 @@ +/* + * arch/arm/lib/call_with_stack.S + * + * Copyright (C) 2011 ARM Ltd. + * Written by Will Deacon + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include + +/* + * void call_with_stack(void (*fn)(void *), void *arg, void *sp) + * + * Change the stack to that pointed at by sp, then invoke fn(arg) with + * the new stack. + */ +ENTRY(call_with_stack) + str sp, [r2, #-4]! + str lr, [r2, #-4]! + + mov sp, r2 + mov r2, r0 + mov r0, r1 + + adr lr, BSYM(1f) + mov pc, r2 + +1: ldr lr, [sp] + ldr sp, [sp, #4] + mov pc, lr +ENDPROC(call_with_stack) --- linux-lowlatency-3.2.0.orig/arch/arm/lib/Makefile +++ linux-lowlatency-3.2.0/arch/arm/lib/Makefile @@ -13,7 +13,8 @@ testchangebit.o testclearbit.o testsetbit.o \ ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ ucmpdi2.o lib1funcs.o div64.o \ - io-readsb.o io-writesb.o io-readsl.o io-writesl.o + io-readsb.o io-writesb.o io-readsl.o io-writesl.o \ + call_with_stack.o mmu-y := clear_user.o copy_page.o getuser.o putuser.o --- linux-lowlatency-3.2.0.orig/arch/arm/mach-ixp4xx/include/mach/system.h +++ linux-lowlatency-3.2.0/arch/arm/mach-ixp4xx/include/mach/system.h @@ -26,7 +26,7 @@ { if ( 1 && mode == 's') { /* Jump into ROM at address 0 */ - cpu_reset(0); + soft_restart(0); } else { /* Use on-chip reset capability */ --- linux-lowlatency-3.2.0.orig/arch/arm/mach-rpc/include/mach/system.h +++ linux-lowlatency-3.2.0/arch/arm/mach-rpc/include/mach/system.h @@ -23,5 +23,5 @@ /* * Jump into the ROM */ - cpu_reset(0); + soft_restart(0); } --- linux-lowlatency-3.2.0.orig/arch/arm/mach-at91/at91sam9263_devices.c +++ linux-lowlatency-3.2.0/arch/arm/mach-at91/at91sam9263_devices.c @@ -92,7 +92,7 @@ * USB Device (Gadget) * -------------------------------------------------------------------- */ -#ifdef CONFIG_USB_AT91 +#if defined(CONFIG_USB_AT91) || defined(CONFIG_USB_AT91_MODULE) static struct at91_udc_data udc_data; static struct resource udc_resources[] = { --- linux-lowlatency-3.2.0.orig/arch/arm/mach-at91/at91rm9200_devices.c +++ linux-lowlatency-3.2.0/arch/arm/mach-at91/at91rm9200_devices.c @@ -83,7 +83,7 @@ * USB Device (Gadget) * -------------------------------------------------------------------- */ -#ifdef CONFIG_USB_AT91 +#if defined(CONFIG_USB_AT91) || defined(CONFIG_USB_AT91_MODULE) static struct at91_udc_data udc_data; static struct resource udc_resources[] = { --- linux-lowlatency-3.2.0.orig/arch/arm/mach-at91/at91sam9261_devices.c +++ linux-lowlatency-3.2.0/arch/arm/mach-at91/at91sam9261_devices.c @@ -87,7 +87,7 @@ * USB Device (Gadget) * -------------------------------------------------------------------- */ -#ifdef CONFIG_USB_AT91 +#if defined(CONFIG_USB_AT91) || defined(CONFIG_USB_AT91_MODULE) static struct at91_udc_data udc_data; static struct resource udc_resources[] = { --- linux-lowlatency-3.2.0.orig/arch/arm/mach-at91/setup.c +++ linux-lowlatency-3.2.0/arch/arm/mach-at91/setup.c @@ -27,9 +27,12 @@ void __init at91rm9200_set_type(int type) { if (type == ARCH_REVISON_9200_PQFP) - at91_soc_initdata.subtype = AT91_SOC_RM9200_BGA; - else at91_soc_initdata.subtype = AT91_SOC_RM9200_PQFP; + else + at91_soc_initdata.subtype = AT91_SOC_RM9200_BGA; + + pr_info("AT91: filled in soc subtype: %s\n", + at91_get_soc_subtype(&at91_soc_initdata)); } void __init at91_init_irq_default(void) --- linux-lowlatency-3.2.0.orig/arch/arm/mach-at91/at91sam9260_devices.c +++ linux-lowlatency-3.2.0/arch/arm/mach-at91/at91sam9260_devices.c @@ -84,7 +84,7 @@ * USB Device (Gadget) * -------------------------------------------------------------------- */ -#ifdef CONFIG_USB_AT91 +#if defined(CONFIG_USB_AT91) || defined(CONFIG_USB_AT91_MODULE) static struct at91_udc_data udc_data; static struct resource udc_resources[] = { --- linux-lowlatency-3.2.0.orig/arch/arm/mach-sa1100/include/mach/system.h +++ linux-lowlatency-3.2.0/arch/arm/mach-sa1100/include/mach/system.h @@ -14,7 +14,7 @@ { if (mode == 's') { /* Jump into ROM at address 0 */ - cpu_reset(0); + soft_restart(0); } else { /* Use on-chip reset capability */ RSRR = RSRR_SWR; --- linux-lowlatency-3.2.0.orig/arch/arm/include/asm/div64.h +++ linux-lowlatency-3.2.0/arch/arm/include/asm/div64.h @@ -73,6 +73,7 @@ #define do_div(n, base) \ ({ \ unsigned int __r, __b = (base); \ + asm("" : "+r" (__b)); \ if (!__builtin_constant_p(__b) || __b == 0 || \ (__LINUX_ARM_ARCH__ < 4 && (__b & (__b - 1)) != 0)) { \ /* non-constant divisor (or zero): slow path */ \ --- linux-lowlatency-3.2.0.orig/arch/arm/include/asm/idmap.h +++ linux-lowlatency-3.2.0/arch/arm/include/asm/idmap.h @@ -0,0 +1,14 @@ +#ifndef __ASM_IDMAP_H +#define __ASM_IDMAP_H + +#include +#include + +/* Tag a function as requiring to be executed via an identity mapping. */ +#define __idmap __section(.idmap.text) noinline notrace + +extern pgd_t *idmap_pgd; + +void setup_mm_for_reboot(void); + +#endif /* __ASM_IDMAP_H */ --- linux-lowlatency-3.2.0.orig/arch/arm/include/asm/pgtable.h +++ linux-lowlatency-3.2.0/arch/arm/include/asm/pgtable.h @@ -336,6 +336,7 @@ * We provide our own arch_get_unmapped_area to cope with VIPT caches. */ #define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN /* * remap a physical page `pfn' of size `size' with page protection `prot' @@ -346,9 +347,6 @@ #define pgtable_cache_init() do { } while (0) -void identity_mapping_add(pgd_t *, unsigned long, unsigned long); -void identity_mapping_del(pgd_t *, unsigned long, unsigned long); - #endif /* !__ASSEMBLY__ */ #endif /* CONFIG_MMU */ --- linux-lowlatency-3.2.0.orig/arch/arm/include/asm/system.h +++ linux-lowlatency-3.2.0/arch/arm/include/asm/system.h @@ -101,6 +101,7 @@ extern void cpu_init(void); void arm_machine_restart(char mode, const char *cmd); +void soft_restart(unsigned long); extern void (*arm_pm_restart)(char str, const char *cmd); #define UDBG_UNDEFINED (1 << 0) --- linux-lowlatency-3.2.0.orig/arch/arm/include/asm/processor.h +++ linux-lowlatency-3.2.0/arch/arm/include/asm/processor.h @@ -123,6 +123,8 @@ #endif +#define HAVE_ARCH_PICK_MMAP_LAYOUT + #endif #endif /* __ASM_ARM_PROCESSOR_H */ --- linux-lowlatency-3.2.0.orig/arch/arm/include/asm/assembler.h +++ linux-lowlatency-3.2.0/arch/arm/include/asm/assembler.h @@ -137,6 +137,11 @@ disable_irq .endm + .macro save_and_disable_irqs_notrace, oldcpsr + mrs \oldcpsr, cpsr + disable_irq_notrace + .endm + /* * Restore interrupt state previously stored in a register. We don't * guarantee that this will preserve the flags. --- linux-lowlatency-3.2.0.orig/arch/arm/include/asm/mach/arch.h +++ linux-lowlatency-3.2.0/arch/arm/include/asm/mach/arch.h @@ -31,10 +31,10 @@ unsigned int video_start; /* start of video RAM */ unsigned int video_end; /* end of video RAM */ - unsigned int reserve_lp0 :1; /* never has lp0 */ - unsigned int reserve_lp1 :1; /* never has lp1 */ - unsigned int reserve_lp2 :1; /* never has lp2 */ - unsigned int soft_reboot :1; /* soft reboot */ + unsigned char reserve_lp0 :1; /* never has lp0 */ + unsigned char reserve_lp1 :1; /* never has lp1 */ + unsigned char reserve_lp2 :1; /* never has lp2 */ + char restart_mode; /* default restart mode */ void (*fixup)(struct tag *, char **, struct meminfo *); void (*reserve)(void);/* reserve mem blocks */ @@ -46,6 +46,7 @@ #ifdef CONFIG_MULTI_IRQ_HANDLER void (*handle_irq)(struct pt_regs *); #endif + void (*restart)(char, const char *); }; /* --- linux-lowlatency-3.2.0.orig/arch/arm/mach-footbridge/cats-hw.c +++ linux-lowlatency-3.2.0/arch/arm/mach-footbridge/cats-hw.c @@ -86,7 +86,7 @@ MACHINE_START(CATS, "Chalice-CATS") /* Maintainer: Philip Blundell */ .atag_offset = 0x100, - .soft_reboot = 1, + .restart_mode = 's', .fixup = fixup_cats, .map_io = footbridge_map_io, .init_irq = footbridge_init_irq, --- linux-lowlatency-3.2.0.orig/arch/arm/mach-footbridge/include/mach/system.h +++ linux-lowlatency-3.2.0/arch/arm/mach-footbridge/include/mach/system.h @@ -24,7 +24,7 @@ /* * Jump into the ROM */ - cpu_reset(0x41000000); + soft_restart(0x41000000); } else { if (machine_is_netwinder()) { /* open up the SuperIO chip --- linux-lowlatency-3.2.0.orig/arch/arm/mach-shmobile/include/mach/system.h +++ linux-lowlatency-3.2.0/arch/arm/mach-shmobile/include/mach/system.h @@ -8,7 +8,7 @@ static inline void arch_reset(char mode, const char *cmd) { - cpu_reset(0); + soft_restart(0); } #endif --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-v7.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-v7.S @@ -63,6 +63,7 @@ * caches disabled. */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_v7_reset) mrc p15, 0, r1, c1, c0, 0 @ ctrl register bic r1, r1, #0x1 @ ...............m @@ -71,6 +72,7 @@ isb mov pc, r0 ENDPROC(cpu_v7_reset) + .popsection /* * cpu_v7_do_idle() @@ -271,10 +273,6 @@ * Initialise TLB, Caches, and MMU state ready to switch the MMU * on. Return in r0 the new CP15 C1 control register setting. * - * We automatically detect if we have a Harvard cache, and use the - * Harvard cache control instructions insead of the unified cache - * control instructions. - * * This should be able to cover all ARMv7 cores. * * It is assumed that: @@ -373,9 +371,7 @@ #endif 3: mov r10, #0 -#ifdef HARVARD_CACHE mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate -#endif dsb #ifdef CONFIG_MMU mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-fa526.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-fa526.S @@ -57,6 +57,7 @@ * loc: location to jump to for soft reset */ .align 4 + .pushsection .idmap.text, "ax" ENTRY(cpu_fa526_reset) /* TODO: Use CP8 if possible... */ mov ip, #0 @@ -73,6 +74,8 @@ nop nop mov pc, r0 +ENDPROC(cpu_fa526_reset) + .popsection /* * cpu_fa526_do_idle() --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-sa110.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-sa110.S @@ -62,6 +62,7 @@ * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_sa110_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches @@ -74,6 +75,8 @@ bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_sa110_reset) + .popsection /* * cpu_sa110_do_idle(type) --- linux-lowlatency-3.2.0.orig/arch/arm/mm/nommu.c +++ linux-lowlatency-3.2.0/arch/arm/mm/nommu.c @@ -43,7 +43,7 @@ /* * We don't need to do anything here for nommu machines. */ -void setup_mm_for_reboot(char mode) +void setup_mm_for_reboot(void) { } --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-feroceon.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-feroceon.S @@ -98,6 +98,7 @@ * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_feroceon_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches @@ -110,6 +111,8 @@ bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_feroceon_reset) + .popsection /* * cpu_feroceon_do_idle() --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-arm1026.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-arm1026.S @@ -84,6 +84,7 @@ * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_arm1026_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches @@ -96,6 +97,8 @@ bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_arm1026_reset) + .popsection /* * cpu_arm1026_do_idle() --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-mohawk.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-mohawk.S @@ -69,6 +69,7 @@ * (same as arm926) */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_mohawk_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches @@ -79,6 +80,8 @@ bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_mohawk_reset) + .popsection /* * cpu_mohawk_do_idle() --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-xsc3.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-xsc3.S @@ -105,6 +105,7 @@ * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_xsc3_reset) mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE msr cpsr_c, r1 @ reset CPSR @@ -119,6 +120,8 @@ @ already containing those two last instructions to survive. mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs mov pc, r0 +ENDPROC(cpu_xsc3_reset) + .popsection /* * cpu_xsc3_do_idle() --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-xscale.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-xscale.S @@ -142,6 +142,7 @@ * Beware PXA270 erratum E7. */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_xscale_reset) mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE msr cpsr_c, r1 @ reset CPSR @@ -160,6 +161,8 @@ @ already containing those two last instructions to survive. mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mov pc, r0 +ENDPROC(cpu_xscale_reset) + .popsection /* * cpu_xscale_do_idle() --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-arm9tdmi.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-arm9tdmi.S @@ -45,8 +45,11 @@ * Params : loc(r0) address to jump to * Purpose : Sets up everything for a reset and jump to the location for soft reset. */ + .pushsection .idmap.text, "ax" ENTRY(cpu_arm9tdmi_reset) mov pc, r0 +ENDPROC(cpu_arm9tdmi_reset) + .popsection __CPUINIT --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-arm1020.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-arm1020.S @@ -95,6 +95,7 @@ * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_arm1020_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches @@ -107,6 +108,8 @@ bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_arm1020_reset) + .popsection /* * cpu_arm1020_do_idle() --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-arm740.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-arm740.S @@ -49,6 +49,7 @@ * Params : r0 = address to jump to * Notes : This sets up everything for a reset */ + .pushsection .idmap.text, "ax" ENTRY(cpu_arm740_reset) mov ip, #0 mcr p15, 0, ip, c7, c0, 0 @ invalidate cache @@ -56,6 +57,8 @@ bic ip, ip, #0x0000000c @ ............wc.. mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_arm740_reset) + .popsection __CPUINIT --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-arm1022.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-arm1022.S @@ -84,6 +84,7 @@ * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_arm1022_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches @@ -96,6 +97,8 @@ bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_arm1022_reset) + .popsection /* * cpu_arm1022_do_idle() --- linux-lowlatency-3.2.0.orig/arch/arm/mm/cache-v7.S +++ linux-lowlatency-3.2.0/arch/arm/mm/cache-v7.S @@ -54,9 +54,15 @@ and r1, r1, #7 @ mask of the bits for current cache only cmp r1, #2 @ see what cache we have at this level blt skip @ skip if no cache, or just i-cache +#ifdef CONFIG_PREEMPT + save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic +#endif mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr isb @ isb to sych the new cssr&csidr mrc p15, 1, r1, c0, c0, 0 @ read the new csidr +#ifdef CONFIG_PREEMPT + restore_irqs_notrace r9 +#endif and r2, r1, #7 @ extract the length of the cache lines add r2, r2, #4 @ add 4 (line length offset) ldr r4, =0x3ff --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-arm925.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-arm925.S @@ -108,6 +108,7 @@ * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_arm925_reset) /* Send software reset to MPU and DSP */ mov ip, #0xff000000 @@ -115,6 +116,8 @@ orr ip, ip, #0x0000ce00 mov r4, #1 strh r4, [ip, #0x10] +ENDPROC(cpu_arm925_reset) + .popsection mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-arm922.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-arm922.S @@ -87,6 +87,7 @@ * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_arm922_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches @@ -99,6 +100,8 @@ bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_arm922_reset) + .popsection /* * cpu_arm922_do_idle() --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-arm920.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-arm920.S @@ -85,6 +85,7 @@ * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_arm920_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches @@ -97,6 +98,8 @@ bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_arm920_reset) + .popsection /* * cpu_arm920_do_idle() --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-v6.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-v6.S @@ -55,6 +55,7 @@ * - loc - location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_v6_reset) mrc p15, 0, r1, c1, c0, 0 @ ctrl register bic r1, r1, #0x1 @ ...............m @@ -62,6 +63,8 @@ mov r1, #0 mcr p15, 0, r1, c7, c5, 4 @ ISB mov pc, r0 +ENDPROC(cpu_v6_reset) + .popsection /* * cpu_v6_do_idle() --- linux-lowlatency-3.2.0.orig/arch/arm/mm/idmap.c +++ linux-lowlatency-3.2.0/arch/arm/mm/idmap.c @@ -1,8 +1,12 @@ #include #include +#include #include #include +#include + +pgd_t *idmap_pgd; static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, unsigned long prot) @@ -28,7 +32,7 @@ } while (pud++, addr = next, addr != end); } -void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) +static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) { unsigned long prot, next; @@ -43,48 +47,41 @@ } while (pgd++, addr = next, addr != end); } -#ifdef CONFIG_SMP -static void idmap_del_pmd(pud_t *pud, unsigned long addr, unsigned long end) -{ - pmd_t *pmd = pmd_offset(pud, addr); - pmd_clear(pmd); -} +extern char __idmap_text_start[], __idmap_text_end[]; -static void idmap_del_pud(pgd_t *pgd, unsigned long addr, unsigned long end) +static int __init init_static_idmap(void) { - pud_t *pud = pud_offset(pgd, addr); - unsigned long next; + phys_addr_t idmap_start, idmap_end; - do { - next = pud_addr_end(addr, end); - idmap_del_pmd(pud, addr, next); - } while (pud++, addr = next, addr != end); -} + idmap_pgd = pgd_alloc(&init_mm); + if (!idmap_pgd) + return -ENOMEM; -void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end) -{ - unsigned long next; + /* Add an identity mapping for the physical address of the section. */ + idmap_start = virt_to_phys((void *)__idmap_text_start); + idmap_end = virt_to_phys((void *)__idmap_text_end); - pgd += pgd_index(addr); - do { - next = pgd_addr_end(addr, end); - idmap_del_pud(pgd, addr, next); - } while (pgd++, addr = next, addr != end); + pr_info("Setting up static identity map for 0x%llx - 0x%llx\n", + (long long)idmap_start, (long long)idmap_end); + identity_mapping_add(idmap_pgd, idmap_start, idmap_end); + + return 0; } -#endif +early_initcall(init_static_idmap); /* - * In order to soft-boot, we need to insert a 1:1 mapping in place of - * the user-mode pages. This will then ensure that we have predictable - * results when turning the mmu off + * In order to soft-boot, we need to switch to a 1:1 mapping for the + * cpu_reset functions. This will then ensure that we have predictable + * results when turning off the mmu. */ -void setup_mm_for_reboot(char mode) +void setup_mm_for_reboot(void) { - /* - * We need to access to user-mode page tables here. For kernel threads - * we don't have any user-mode mappings so we use the context that we - * "borrowed". - */ - identity_mapping_add(current->active_mm->pgd, 0, TASK_SIZE); + /* Clean and invalidate L1. */ + flush_cache_all(); + + /* Switch to the identity mapping. */ + cpu_switch_mm(idmap_pgd, &init_mm); + + /* Flush the TLB. */ local_flush_tlb_all(); } --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-arm6_7.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-arm6_7.S @@ -225,6 +225,7 @@ * Params : r0 = address to jump to * Notes : This sets up everything for a reset */ + .pushsection .idmap.text, "ax" ENTRY(cpu_arm6_reset) ENTRY(cpu_arm7_reset) mov r1, #0 @@ -235,6 +236,9 @@ mov r1, #0x30 mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc mov pc, r0 +ENDPROC(cpu_arm6_reset) +ENDPROC(cpu_arm7_reset) + .popsection __CPUINIT --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-arm720.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-arm720.S @@ -101,6 +101,7 @@ * Params : r0 = address to jump to * Notes : This sets up everything for a reset */ + .pushsection .idmap.text, "ax" ENTRY(cpu_arm720_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate cache @@ -112,6 +113,8 @@ bic ip, ip, #0x2100 @ ..v....s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_arm720_reset) + .popsection __CPUINIT --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-arm940.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-arm940.S @@ -48,6 +48,7 @@ * Params : r0 = address to jump to * Notes : This sets up everything for a reset */ + .pushsection .idmap.text, "ax" ENTRY(cpu_arm940_reset) mov ip, #0 mcr p15, 0, ip, c7, c5, 0 @ flush I cache @@ -58,6 +59,8 @@ bic ip, ip, #0x00001000 @ i-cache mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_arm940_reset) + .popsection /* * cpu_arm940_do_idle() --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-arm7tdmi.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-arm7tdmi.S @@ -45,8 +45,11 @@ * Params : loc(r0) address to jump to * Purpose : Sets up everything for a reset and jump to the location for soft reset. */ + .pushsection .idmap.text, "ax" ENTRY(cpu_arm7tdmi_reset) mov pc, r0 +ENDPROC(cpu_arm7tdmi_reset) + .popsection __CPUINIT --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-arm1020e.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-arm1020e.S @@ -95,6 +95,7 @@ * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_arm1020e_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches @@ -107,6 +108,8 @@ bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_arm1020e_reset) + .popsection /* * cpu_arm1020e_do_idle() --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-sa1100.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-sa1100.S @@ -70,6 +70,7 @@ * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_sa1100_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches @@ -82,6 +83,8 @@ bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_sa1100_reset) + .popsection /* * cpu_sa1100_do_idle(type) --- linux-lowlatency-3.2.0.orig/arch/arm/mm/mmap.c +++ linux-lowlatency-3.2.0/arch/arm/mm/mmap.c @@ -11,10 +11,49 @@ #include #include +static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, + unsigned long pgoff) +{ + unsigned long base = addr & ~(SHMLBA-1); + unsigned long off = (pgoff << PAGE_SHIFT) & (SHMLBA-1); + + if (base + off <= addr) + return base + off; + + return base - off; +} + #define COLOUR_ALIGN(addr,pgoff) \ ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ (((pgoff)<personality & ADDR_COMPAT_LAYOUT) + return 1; + + if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) + return 1; + + return sysctl_legacy_va_layout; +} + +static unsigned long mmap_base(unsigned long rnd) +{ + unsigned long gap = rlimit(RLIMIT_STACK); + + if (gap < MIN_GAP) + gap = MIN_GAP; + else if (gap > MAX_GAP) + gap = MAX_GAP; + + return PAGE_ALIGN(TASK_SIZE - gap - rnd); +} + /* * We need to ensure that shared mappings are correctly aligned to * avoid aliasing issues with VIPT caches. We need to ensure that @@ -68,13 +107,9 @@ if (len > mm->cached_hole_size) { start_addr = addr = mm->free_area_cache; } else { - start_addr = addr = TASK_UNMAPPED_BASE; + start_addr = addr = mm->mmap_base; mm->cached_hole_size = 0; } - /* 8 bits of randomness in 20 address space bits */ - if ((current->flags & PF_RANDOMIZE) && - !(current->personality & ADDR_NO_RANDOMIZE)) - addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT; full_search: if (do_align) @@ -111,6 +146,134 @@ } } +unsigned long +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + const unsigned long len, const unsigned long pgoff, + const unsigned long flags) +{ + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + unsigned long addr = addr0; + int do_align = 0; + int aliasing = cache_is_vipt_aliasing(); + + /* + * We only need to do colour alignment if either the I or D + * caches alias. + */ + if (aliasing) + do_align = filp || (flags & MAP_SHARED); + + /* requested length too big for entire address space */ + if (len > TASK_SIZE) + return -ENOMEM; + + if (flags & MAP_FIXED) { + if (aliasing && flags & MAP_SHARED && + (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) + return -EINVAL; + return addr; + } + + /* requesting a specific address */ + if (addr) { + if (do_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + + /* check if free_area_cache is useful for us */ + if (len <= mm->cached_hole_size) { + mm->cached_hole_size = 0; + mm->free_area_cache = mm->mmap_base; + } + + /* either no address requested or can't fit in requested address hole */ + addr = mm->free_area_cache; + if (do_align) { + unsigned long base = COLOUR_ALIGN_DOWN(addr - len, pgoff); + addr = base + len; + } + + /* make sure it can fit in the remaining address space */ + if (addr > len) { + vma = find_vma(mm, addr-len); + if (!vma || addr <= vma->vm_start) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } + + if (mm->mmap_base < len) + goto bottomup; + + addr = mm->mmap_base - len; + if (do_align) + addr = COLOUR_ALIGN_DOWN(addr, pgoff); + + do { + /* + * Lookup failure means no vma is above this address, + * else if new region fits below vma->vm_start, + * return with success: + */ + vma = find_vma(mm, addr); + if (!vma || addr+len <= vma->vm_start) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + + /* remember the largest hole we saw so far */ + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + + /* try just below the current vma->vm_start */ + addr = vma->vm_start - len; + if (do_align) + addr = COLOUR_ALIGN_DOWN(addr, pgoff); + } while (len < vma->vm_start); + +bottomup: + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + mm->cached_hole_size = ~0UL; + mm->free_area_cache = TASK_UNMAPPED_BASE; + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); + /* + * Restore the topdown base: + */ + mm->free_area_cache = mm->mmap_base; + mm->cached_hole_size = ~0UL; + + return addr; +} + +void arch_pick_mmap_layout(struct mm_struct *mm) +{ + unsigned long random_factor = 0UL; + + /* 8 bits of randomness in 20 address space bits */ + if ((current->flags & PF_RANDOMIZE) && + !(current->personality & ADDR_NO_RANDOMIZE)) + random_factor = (get_random_int() % (1 << 8)) << PAGE_SHIFT; + + if (mmap_is_legacy()) { + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } else { + mm->mmap_base = mmap_base(random_factor); + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + mm->unmap_area = arch_unmap_area_topdown; + } +} /* * You really shouldn't be using read() or write() on /dev/mem. This --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-arm946.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-arm946.S @@ -55,6 +55,7 @@ * Params : r0 = address to jump to * Notes : This sets up everything for a reset */ + .pushsection .idmap.text, "ax" ENTRY(cpu_arm946_reset) mov ip, #0 mcr p15, 0, ip, c7, c5, 0 @ flush I cache @@ -65,6 +66,8 @@ bic ip, ip, #0x00001000 @ i-cache mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_arm946_reset) + .popsection /* * cpu_arm946_do_idle() --- linux-lowlatency-3.2.0.orig/arch/arm/mm/proc-arm926.S +++ linux-lowlatency-3.2.0/arch/arm/mm/proc-arm926.S @@ -77,6 +77,7 @@ * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_arm926_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches @@ -89,6 +90,8 @@ bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_arm926_reset) + .popsection /* * cpu_arm926_do_idle() --- linux-lowlatency-3.2.0.orig/arch/arm/mach-s3c64xx/include/mach/system.h +++ linux-lowlatency-3.2.0/arch/arm/mach-s3c64xx/include/mach/system.h @@ -24,7 +24,7 @@ arch_wdt_reset(); /* if all else fails, or mode was for soft, jump to 0 */ - cpu_reset(0); + soft_restart(0); } #endif /* __ASM_ARCH_IRQ_H */ --- linux-lowlatency-3.2.0.orig/arch/arm/common/pl330.c +++ linux-lowlatency-3.2.0/arch/arm/common/pl330.c @@ -1496,12 +1496,13 @@ struct pl330_thread *thrd = ch_id; struct pl330_dmac *pl330; unsigned long flags; - int ret = 0, active = thrd->req_running; + int ret = 0, active; if (!thrd || thrd->free || thrd->dmac->state == DYING) return -EINVAL; pl330 = thrd->dmac; + active = thrd->req_running; spin_lock_irqsave(&pl330->lock, flags); --- linux-lowlatency-3.2.0.orig/arch/arm/mach-ux500/board-mop500-sdi.c +++ linux-lowlatency-3.2.0/arch/arm/mach-ux500/board-mop500-sdi.c @@ -233,6 +233,8 @@ { u32 periphid = 0x10480180; + /* On Snowball MMC_CAP_SD_HIGHSPEED isn't supported on sdi0 */ + mop500_sdi0_data.capabilities &= ~MMC_CAP_SD_HIGHSPEED; mop500_sdi2_data.capabilities |= MMC_CAP_MMC_HIGHSPEED; /* On-board eMMC */ --- linux-lowlatency-3.2.0.orig/arch/arm/mach-ux500/Kconfig +++ linux-lowlatency-3.2.0/arch/arm/mach-ux500/Kconfig @@ -7,6 +7,7 @@ select HAS_MTU select ARM_ERRATA_753970 select ARM_ERRATA_754322 + select ARM_ERRATA_764369 menu "Ux500 SoC" --- linux-lowlatency-3.2.0.orig/arch/arm/mach-w90x900/include/mach/system.h +++ linux-lowlatency-3.2.0/arch/arm/mach-w90x900/include/mach/system.h @@ -33,7 +33,7 @@ { if (mode == 's') { /* Jump into ROM at address 0 */ - cpu_reset(0); + soft_restart(0); } else { __raw_writel(WTE | WTRE | WTCLK, WTCR); } --- linux-lowlatency-3.2.0.orig/arch/arm/mach-mmp/include/mach/system.h +++ linux-lowlatency-3.2.0/arch/arm/mach-mmp/include/mach/system.h @@ -19,8 +19,8 @@ static inline void arch_reset(char mode, const char *cmd) { if (cpu_is_pxa168()) - cpu_reset(0xffff0000); + soft_restart(0xffff0000); else - cpu_reset(0); + soft_restart(0); } #endif /* __ASM_MACH_SYSTEM_H */ --- linux-lowlatency-3.2.0.orig/arch/arm/mach-iop32x/include/mach/system.h +++ linux-lowlatency-3.2.0/arch/arm/mach-iop32x/include/mach/system.h @@ -30,5 +30,5 @@ *IOP3XX_PCSR = 0x30; /* Jump into ROM at address 0 */ - cpu_reset(0); + soft_restart(0); } --- linux-lowlatency-3.2.0.orig/arch/arm/mach-omap2/display.c +++ linux-lowlatency-3.2.0/arch/arm/mach-omap2/display.c @@ -22,12 +22,13 @@ #include #include #include +#include #include