PAGE_ALIGNED_DATA(PAGE_SIZE)
        }
 
-       .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
+       .data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {
                CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
        }
 
 
 /*
  * per-CPU TSS segments. Threads are completely 'soft' on Linux,
  * no more per-task TSS's. The TSS size is kept cacheline-aligned
- * so they are allowed to end up in the .data.cacheline_aligned
+ * so they are allowed to end up in the .data..cacheline_aligned
  * section. Since TSS's are completely CPU-local, we want them
  * on exact cacheline boundaries, to eliminate cacheline ping-pong.
  */
 
 
 #define CACHELINE_ALIGNED_DATA(align)                                  \
        . = ALIGN(align);                                               \
-       *(.data.cacheline_aligned)
+       *(.data..cacheline_aligned)
 
 #define INIT_TASK_DATA(align)                                          \
        . = ALIGN(align);                                               \
 
 #ifndef __cacheline_aligned
 #define __cacheline_aligned                                    \
   __attribute__((__aligned__(SMP_CACHE_BYTES),                 \
-                __section__(".data.cacheline_aligned")))
+                __section__(".data..cacheline_aligned")))
 #endif /* __cacheline_aligned */
 
 #ifndef __cacheline_aligned_in_smp