Where the mips32 & mips64 implementations of start.S differ in terms of
access sizes & offsets, use the appropriate macros from asm.h to
abstract those differences away. This is in preparation for sharing a
single copy of start.S between mips32 & mips64.
The exception to this is loads of immediates to be written to the cop0
Config register, which is a 32bit register on mips64 and therefore
constants written to it can be loaded as such.
Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Cc: Daniel Schwierzeck <daniel.schwierzeck@gmail.com>
#include <asm-offsets.h>
#include <config.h>
#include <asm-offsets.h>
#include <config.h>
#include <asm/regdef.h>
#include <asm/mipsregs.h>
#include <asm/regdef.h>
#include <asm/mipsregs.h>
reset:
/* Clear watch registers */
reset:
/* Clear watch registers */
- mtc0 zero, CP0_WATCHLO
- mtc0 zero, CP0_WATCHHI
+ MTC0 zero, CP0_WATCHLO
+ MTC0 zero, CP0_WATCHHI
/* WP(Watch Pending), SW0/1 should be cleared */
mtc0 zero, CP0_CAUSE
/* WP(Watch Pending), SW0/1 should be cleared */
mtc0 zero, CP0_CAUSE
mtc0 t0, CP0_CONFIG
#endif
mtc0 t0, CP0_CONFIG
#endif
+ /*
+ * Initialize $gp, force pointer sized alignment of bal instruction to
+ * forbid the compiler to put nop's between bal and _gp. This is
+ * required to keep _gp and ra aligned to 8 byte.
+ */
+ .align PTRLOG
#ifndef CONFIG_SKIP_LOWLEVEL_INIT
/* Initialize any external memory */
#ifndef CONFIG_SKIP_LOWLEVEL_INIT
/* Initialize any external memory */
+ PTR_LA t9, lowlevel_init
jalr t9
nop
/* Initialize caches... */
jalr t9
nop
/* Initialize caches... */
- la t9, mips_cache_reset
+ PTR_LA t9, mips_cache_reset
#endif
/* Set up temporary stack */
#endif
/* Set up temporary stack */
- li t0, -16
- li t1, CONFIG_SYS_INIT_SP_ADDR
+ PTR_LI t0, -16
+ PTR_LI t1, CONFIG_SYS_INIT_SP_ADDR
and sp, t1, t0 # force 16 byte alignment
and sp, t1, t0 # force 16 byte alignment
- sub sp, sp, GD_SIZE # reserve space for gd
+ PTR_SUB sp, sp, GD_SIZE # reserve space for gd
and sp, sp, t0 # force 16 byte alignment
move k0, sp # save gd pointer
#ifdef CONFIG_SYS_MALLOC_F_LEN
and sp, sp, t0 # force 16 byte alignment
move k0, sp # save gd pointer
#ifdef CONFIG_SYS_MALLOC_F_LEN
- li t2, CONFIG_SYS_MALLOC_F_LEN
- sub sp, sp, t2 # reserve space for early malloc
+ PTR_LI t2, CONFIG_SYS_MALLOC_F_LEN
+ PTR_SUB sp, sp, t2 # reserve space for early malloc
and sp, sp, t0 # force 16 byte alignment
#endif
move fp, sp
and sp, sp, t0 # force 16 byte alignment
#endif
move fp, sp
1:
sw zero, 0(t0)
blt t0, t1, 1b
1:
sw zero, 0(t0)
blt t0, t1, 1b
#ifdef CONFIG_SYS_MALLOC_F_LEN
#ifdef CONFIG_SYS_MALLOC_F_LEN
- addu t0, k0, GD_MALLOC_BASE # gd->malloc_base offset
+ PTR_ADDU t0, k0, GD_MALLOC_BASE # gd->malloc_base offset
+ PTR_LA t9, board_init_f
move s0, a1 # save gd in s0
move s2, a2 # save destination address in s2
move s0, a1 # save gd in s0
move s2, a2 # save destination address in s2
- li t0, CONFIG_SYS_MONITOR_BASE
- sub s1, s2, t0 # s1 <-- relocation offset
+ PTR_LI t0, CONFIG_SYS_MONITOR_BASE
+ PTR_SUB s1, s2, t0 # s1 <-- relocation offset
- la t3, in_ram
- lw t2, -12(t3) # t2 <-- __image_copy_end
+ PTR_LA t3, in_ram
+ PTR_L t2, -(3 * PTRSIZE)(t3) # t2 <-- __image_copy_end
+ PTR_ADD gp, s1 # adjust gp
1:
lw t3, 0(t0)
sw t3, 0(t1)
1:
lw t3, 0(t0)
sw t3, 0(t1)
/* If caches were enabled, we would have to flush them here. */
/* If caches were enabled, we would have to flush them here. */
- sub a1, t1, s2 # a1 <-- size
- la t9, flush_cache
+ PTR_SUB a1, t1, s2 # a1 <-- size
+ PTR_LA t9, flush_cache
jalr t9
move a0, s2 # a0 <-- destination address
/* Jump to where we've relocated ourselves */
jalr t9
move a0, s2 # a0 <-- destination address
/* Jump to where we've relocated ourselves */
- addi t0, s2, in_ram - _start
+ PTR_ADDI t0, s2, in_ram - _start
- .word __rel_dyn_end
- .word __rel_dyn_start
- .word __image_copy_end
- .word _GLOBAL_OFFSET_TABLE_
- .word num_got_entries
+ PTR __rel_dyn_end
+ PTR __rel_dyn_start
+ PTR __image_copy_end
+ PTR _GLOBAL_OFFSET_TABLE_
+ PTR num_got_entries
* GOT[0] is reserved. GOT[1] is also reserved for the dynamic object
* generated by GNU ld. Skip these reserved entries from relocation.
*/
* GOT[0] is reserved. GOT[1] is also reserved for the dynamic object
* generated by GNU ld. Skip these reserved entries from relocation.
*/
- lw t3, -4(t0) # t3 <-- num_got_entries
- lw t8, -8(t0) # t8 <-- _GLOBAL_OFFSET_TABLE_
- add t8, s1 # t8 now holds relocated _G_O_T_
- addi t8, t8, 8 # skipping first two entries
- li t2, 2
+ PTR_L t3, -(1 * PTRSIZE)(t0) # t3 <-- num_got_entries
+ PTR_L t8, -(2 * PTRSIZE)(t0) # t8 <-- _GLOBAL_OFFSET_TABLE_
+ PTR_ADD t8, s1 # t8 now holds relocated _G_O_T_
+ PTR_ADDI t8, t8, 2 * PTRSIZE # skipping first two entries
+ PTR_LI t2, 2
- add t1, s1
- sw t1, 0(t8)
+ PTR_ADD t1, s1
+ PTR_S t1, 0(t8)
/* Update dynamic relocations */
/* Update dynamic relocations */
- lw t1, -16(t0) # t1 <-- __rel_dyn_start
- lw t2, -20(t0) # t2 <-- __rel_dyn_end
+ PTR_L t1, -(4 * PTRSIZE)(t0) # t1 <-- __rel_dyn_start
+ PTR_L t2, -(5 * PTRSIZE)(t0) # t2 <-- __rel_dyn_end
b 2f # skip first reserved entry
b 2f # skip first reserved entry
+ PTR_ADDI t1, 2 * PTRSIZE
1:
lw t8, -4(t1) # t8 <-- relocation info
1:
lw t8, -4(t1) # t8 <-- relocation info
bne t8, t3, 2f # skip non R_MIPS_REL32 entries
nop
bne t8, t3, 2f # skip non R_MIPS_REL32 entries
nop
- lw t3, -8(t1) # t3 <-- location to fix up in FLASH
+ PTR_L t3, -(2 * PTRSIZE)(t1) # t3 <-- location to fix up in FLASH
- lw t8, 0(t3) # t8 <-- original pointer
- add t8, s1 # t8 <-- adjusted pointer
+ PTR_L t8, 0(t3) # t8 <-- original pointer
+ PTR_ADD t8, s1 # t8 <-- adjusted pointer
- add t3, s1 # t3 <-- location to fix up in RAM
- sw t8, 0(t3)
+ PTR_ADD t3, s1 # t3 <-- location to fix up in RAM
+ PTR_S t8, 0(t3)
- addi t1, 8 # each rel.dyn entry is 8 bytes
+ PTR_ADDI t1, 2 * PTRSIZE # each rel.dyn entry is 2*PTRSIZE bytes
* GOT is now relocated. Thus __bss_start and __bss_end can be
* accessed directly via $gp.
*/
* GOT is now relocated. Thus __bss_start and __bss_end can be
* accessed directly via $gp.
*/
- la t1, __bss_start # t1 <-- __bss_start
- la t2, __bss_end # t2 <-- __bss_end
+ PTR_LA t1, __bss_start # t1 <-- __bss_start
+ PTR_LA t2, __bss_end # t2 <-- __bss_end
move a0, s0 # a0 <-- gd
move a1, s2
move a0, s0 # a0 <-- gd
move a1, s2
+ PTR_LA t9, board_init_r