got all of the code to actually compile again

I don't think it actually works though.
diff --git a/aos/atom_code/ipc_lib/cmpxchg.h b/aos/atom_code/ipc_lib/cmpxchg.h
index acb4a3c..715c57d 100644
--- a/aos/atom_code/ipc_lib/cmpxchg.h
+++ b/aos/atom_code/ipc_lib/cmpxchg.h
@@ -9,10 +9,10 @@
 
 #define cmpxchg(ptr, o, n) __sync_val_compare_and_swap(ptr, o, n)
 /*#define xchg(ptr, n) ({typeof(*ptr) r; \
-		do{ \
-			r = *ptr; \
-		}while(!__sync_bool_compare_and_swap(ptr, r, n)); \
-		r; \
+    do{ \
+      r = *ptr; \
+    }while(!__sync_bool_compare_and_swap(ptr, r, n)); \
+    r; \
 })*/
 
 #  define LOCK "lock;"
@@ -24,7 +24,7 @@
 
 /*static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
 {
-	*ptr = val;
+  *ptr = val;
 }
 
 #define _set_64bit set_64bit*/
@@ -32,37 +32,37 @@
 /*
  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
  * Note 2: xchg has side effect, so that attribute volatile is necessary,
- *	  but generally the primitive is invalid, *ptr is output argument. --ANK
+ *    but generally the primitive is invalid, *ptr is output argument. --ANK
  */
 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
 {
-	switch (size) {
-		case 1:
-			__asm__ __volatile__("xchgb %b0,%1"
-					:"=q" (x)
-					:"m" (*__xg(ptr)), "0" (x)
-					:"memory");
-			break;
-		case 2:
-			__asm__ __volatile__("xchgw %w0,%1"
-					:"=r" (x)
-					:"m" (*__xg(ptr)), "0" (x)
-					:"memory");
-			break;
-		case 4:
-			__asm__ __volatile__("xchgl %k0,%1"
-					:"=r" (x)
-					:"m" (*__xg(ptr)), "0" (x)
-					:"memory");
-			break;
-		case 8:
-			__asm__ __volatile__("xchg %0,%1"
-					:"=r" (x)
-					:"m" (*__xg(ptr)), "0" (x)
-					:"memory");
-			break;
-	}
-	return x;
+  switch (size) {
+    case 1:
+      __asm__ __volatile__("xchgb %b0,%1"
+          :"=q" (x)
+          :"m" (*__xg(ptr)), "0" (x)
+          :"memory");
+      break;
+    case 2:
+      __asm__ __volatile__("xchgw %w0,%1"
+          :"=r" (x)
+          :"m" (*__xg(ptr)), "0" (x)
+          :"memory");
+      break;
+    case 4:
+      __asm__ __volatile__("xchgl %k0,%1"
+          :"=r" (x)
+          :"m" (*__xg(ptr)), "0" (x)
+          :"memory");
+      break;
+    case 8:
+      __asm__ __volatile__("xchg %0,%1"
+          :"=r" (x)
+          :"m" (*__xg(ptr)), "0" (x)
+          :"memory");
+      break;
+  }
+  return x;
 }
 
 /*
@@ -76,78 +76,78 @@
 #define __HAVE_ARCH_CMPXCHG 1
 
 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
-		unsigned long new, int size)
+    unsigned long new, int size)
 {
-	int32_t prev;
-	switch (size) {
-		case 1:
-			__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
-				    : "=a"(prev)
-				    : "q"(new), "m"(*__xg(ptr)), "0"(old)
-				    : "memory");
-			return prev;
-		case 2:
-			__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
-				    : "=a"(prev)
-				    : "r"(new), "m"(*__xg(ptr)), "0"(old)
-				    : "memory");
-			return prev;
-		case 4:
-			__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
-				    : "=a"(prev)
-				    : "r"(new), "m"(*__xg(ptr)), "0"(old)
-				    : "memory");
-			return prev;
-		case 8:
-			__asm__ __volatile__("lock; cmpxchg %1,%2"
-				    : "=a"(prev)
-				    : "q"(new), "m"(*__xg(ptr)), "0"(old)
-				    : "memory");
-			return prev;
-	}
-	return old;
+  int32_t prev;
+  switch (size) {
+    case 1:
+      __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+            : "=a"(prev)
+            : "q"(new), "m"(*__xg(ptr)), "0"(old)
+            : "memory");
+      return prev;
+    case 2:
+      __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+            : "=a"(prev)
+            : "r"(new), "m"(*__xg(ptr)), "0"(old)
+            : "memory");
+      return prev;
+    case 4:
+      __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
+            : "=a"(prev)
+            : "r"(new), "m"(*__xg(ptr)), "0"(old)
+            : "memory");
+      return prev;
+    case 8:
+      __asm__ __volatile__("lock; cmpxchg %1,%2"
+            : "=a"(prev)
+            : "q"(new), "m"(*__xg(ptr)), "0"(old)
+            : "memory");
+      return prev;
+  }
+  return old;
 }
 
 /*
 static inline unsigned long __cmpxchg_local(volatile void *ptr,
-			unsigned long old, unsigned long new, int size)
+      unsigned long old, unsigned long new, int size)
 {
-	unsigned long prev;
-	switch (size) {
-	case 1:
-		__asm__ __volatile__("cmpxchgb %b1,%2"
-				     : "=a"(prev)
-				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
-		return prev;
-	case 2:
-		__asm__ __volatile__("cmpxchgw %w1,%2"
-				     : "=a"(prev)
-				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
-		return prev;
-	case 4:
-		__asm__ __volatile__("cmpxchgl %k1,%2"
-				     : "=a"(prev)
-				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
-		return prev;
-	case 8:
-		__asm__ __volatile__("cmpxchgq %1,%2"
-				     : "=a"(prev)
-				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
-		return prev;
-	}
-	return old;
+  unsigned long prev;
+  switch (size) {
+  case 1:
+    __asm__ __volatile__("cmpxchgb %b1,%2"
+             : "=a"(prev)
+             : "q"(new), "m"(*__xg(ptr)), "0"(old)
+             : "memory");
+    return prev;
+  case 2:
+    __asm__ __volatile__("cmpxchgw %w1,%2"
+             : "=a"(prev)
+             : "r"(new), "m"(*__xg(ptr)), "0"(old)
+             : "memory");
+    return prev;
+  case 4:
+    __asm__ __volatile__("cmpxchgl %k1,%2"
+             : "=a"(prev)
+             : "r"(new), "m"(*__xg(ptr)), "0"(old)
+             : "memory");
+    return prev;
+  case 8:
+    __asm__ __volatile__("cmpxchgq %1,%2"
+             : "=a"(prev)
+             : "r"(new), "m"(*__xg(ptr)), "0"(old)
+             : "memory");
+    return prev;
+  }
+  return old;
 }*/
 
 #define cmpxchg(ptr,o,n)\
-	((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
-					(unsigned long)(n),sizeof(*(ptr))))
+  ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+          (unsigned long)(n),sizeof(*(ptr))))
 /*#define cmpxchg_local(ptr,o,n)\
-	((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
-					(unsigned long)(n),sizeof(*(ptr))))*/
+  ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+          (unsigned long)(n),sizeof(*(ptr))))*/
 #endif
 
 #endif