]> git.hungrycats.org Git - linux/commitdiff
[SPARC64]: Always record actual PC when kernel profiling.
authorDavid S. Miller <davem@nuts.davemloft.net>
Sun, 8 Aug 2004 12:53:23 +0000 (05:53 -0700)
committerDavid S. Miller <davem@nuts.davemloft.net>
Sun, 8 Aug 2004 12:53:23 +0000 (05:53 -0700)
We used to play games reporting the callers
PC in certain functions such as the rwlock
and atomic_t routines.  If anything, somethin
like this should be optional, not by default.

Signed-off-by: David S. Miller <davem@redhat.com>
arch/sparc64/kernel/time.c
arch/sparc64/lib/VISbzero.S
arch/sparc64/lib/VIScopy.S
arch/sparc64/lib/atomic.S
arch/sparc64/lib/bitops.S
arch/sparc64/lib/rwlock.S

index 11625cce4891f64d5710ea021b6219df91d86a14..f961a0d3b1290a412be5de381bb1508172e1c906 100644 (file)
@@ -443,8 +443,7 @@ static inline void timer_check_rtc(void)
 
 void sparc64_do_profile(struct pt_regs *regs)
 {
-       unsigned long pc = regs->tpc;
-       unsigned long o7 = regs->u_regs[UREG_RETPC];
+       unsigned long pc;
 
        profile_hook(regs);
 
@@ -454,32 +453,14 @@ void sparc64_do_profile(struct pt_regs *regs)
        if (!prof_buffer)
                return;
 
-       {
-               extern int rwlock_impl_begin, rwlock_impl_end;
-               extern int atomic_impl_begin, atomic_impl_end;
-               extern int __memcpy_begin, __memcpy_end;
-               extern int __bzero_begin, __bzero_end;
-               extern int __bitops_begin, __bitops_end;
-
-               if ((pc >= (unsigned long) &atomic_impl_begin &&
-                    pc < (unsigned long) &atomic_impl_end) ||
-                   (pc >= (unsigned long) &rwlock_impl_begin &&
-                    pc < (unsigned long) &rwlock_impl_end) ||
-                   (pc >= (unsigned long) &__memcpy_begin &&
-                    pc < (unsigned long) &__memcpy_end) ||
-                   (pc >= (unsigned long) &__bzero_begin &&
-                    pc < (unsigned long) &__bzero_end) ||
-                   (pc >= (unsigned long) &__bitops_begin &&
-                    pc < (unsigned long) &__bitops_end))
-                       pc = o7;
-
-               pc -= (unsigned long) _stext;
-               pc >>= prof_shift;
-
-               if(pc >= prof_len)
-                       pc = prof_len - 1;
-               atomic_inc((atomic_t *)&prof_buffer[pc]);
-       }
+       pc = regs->tpc;
+
+       pc -= (unsigned long) _stext;
+       pc >>= prof_shift;
+
+       if(pc >= prof_len)
+               pc = prof_len - 1;
+       atomic_inc((atomic_t *)&prof_buffer[pc]);
 }
 
 static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
index c8713995c4767f9d3fd0fc354be3fc849da35877..06b697bab974bd4da4ccf2afa25ef3d3e7440d9e 100644 (file)
@@ -83,8 +83,6 @@
        .text
        .align          32
 #ifdef __KERNEL__
-       .globl          __bzero_begin
-__bzero_begin:
        .globl          __bzero, __bzero_noasi
 __bzero_noasi:
        rd              %asi, %g5
@@ -274,5 +272,3 @@ VISbzerofixup_zb:
        ba,pt           %xcc, VISbzerofixup_ret0
         sub            %o1, %g2, %o0
 #endif
-       .globl          __bzero_end
-__bzero_end:
index e47bde082392a041d63595aa0b3cf17cac6c5564..b3e0fa5dca0e45b3e0a5ee53364d456d7471b64f 100644 (file)
                .type                   bcopy,@function
 
 #ifdef __KERNEL__
-               .globl                  __memcpy_begin
-__memcpy_begin:
-
 memcpy_private:
 memcpy:                mov             ASI_P, asi_src                  ! IEU0  Group
                brnz,pt         %o2, __memcpy_entry             ! CTI
@@ -1055,9 +1052,6 @@ fpu_retl:
        FPU_RETL
 
 #ifdef __KERNEL__
-       .globl          __memcpy_end
-__memcpy_end:
-
                .section        .fixup
                .align          4
 VIScopyfixup_reto2:
index 13d68fd64d95ad745798706306226a0f59be3a59..a0d6d8ac3bd541da0e5681394daef0dfd7a0f76a 100644 (file)
@@ -9,10 +9,7 @@
        .text
        .align  64
 
-       .globl  atomic_impl_begin, atomic_impl_end
-
        .globl  __atomic_add
-atomic_impl_begin:
 __atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
        lduw    [%o1], %g5
        add     %g5, %o0, %g7
@@ -56,4 +53,3 @@ __atomic64_sub: /* %o0 = increment, %o1 = atomic_ptr */
        retl
         sub    %g7, %o0, %o0
 
-atomic_impl_end:
index fa8558237a7f2724e9c94604a0f210b9c6358e97..0c2aacc8398d6d06f4630ce3b60d458905773661 100644 (file)
@@ -8,9 +8,6 @@
 
        .text
        .align  64
-       .globl  __bitops_begin
-__bitops_begin:
-
        .globl  ___test_and_set_bit
 ___test_and_set_bit:   /* %o0=nr, %o1=addr */
        srlx    %o0, 6, %g1
@@ -105,6 +102,3 @@ ___test_and_clear_le_bit:   /* %o0=nr, %o1=addr */
         lduwa  [%o1] ASI_PL, %g7
 2:     retl
         membar #StoreLoad | #StoreStore
-
-       .globl  __bitops_end
-__bitops_end:
index ffbf75bc3bc7f7fcfe31c28a9bcd2b22c87d7a20..8d8ecece2ed9d099f1468d0c893d89596350c67a 100644 (file)
@@ -7,12 +7,9 @@
        .text
        .align  64
 
-       .globl  rwlock_impl_begin, rwlock_impl_end
-
        /* The non-contention read lock usage is 2 cache lines. */
 
        .globl  __read_lock, __read_unlock
-rwlock_impl_begin:
 __read_lock: /* %o0 = lock_ptr */
        ldsw            [%o0], %g5
        brlz,pn         %g5, __read_wait_for_writer
@@ -85,5 +82,4 @@ __write_trylock_succeed:
 __write_trylock_fail:
        retl
         mov            0, %o0
-rwlock_impl_end: