pub/scm/linux/kernel/git/jirislaby/linux.git  about / heads / tags
The Linux kernel with my patches
   commit e5e8995a8ffe1b2d8c9b8445b5581116e576127b (patch)
   parent c1aa905a304e4b Merge tag 'pm-4.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
     tree 990e06d66214816310fd07e975153f6bc6bd42b4
   author Jiri Slaby <jslaby@suse.cz>  2015-04-16 12:37:35 +0200
committer Jiri Slaby <jslaby@suse.cz>  2017-03-10 19:47:09 +0100

symbolic execution

---
 Makefile                           |   4 +-
 arch/x86/include/asm/asm.h         |   2 +-
 arch/x86/include/asm/atomic.h      |  36 ++--
 arch/x86/include/asm/atomic64_64.h |  34 ++--
 arch/x86/include/asm/bitops.h      | 201 ++++++++++----------
 arch/x86/include/asm/bug.h         |   5 +-
 arch/x86/include/asm/current.h     |   2 +-
 arch/x86/include/asm/irqflags.h    |  21 +--
 arch/x86/include/asm/page.h        |   1 +
 arch/x86/include/asm/preempt.h     |  10 +-
 arch/x86/include/asm/processor.h   |   6 -
 arch/x86/include/asm/uaccess.h     |  43 +----
 common.c                           | 377 +++++++++++++++++++++++++++++++++++++
 drivers/tty/Makefile               |   2 +-
 drivers/tty/main.c                 | 161 ++++++++++++++++
 drivers/tty/n_tty.c                |   2 +-
 drivers/tty/tty_io.c               |   4 +-
 drivers/tty/vt/Makefile            |   2 +-
 drivers/tty/vt/main.c              | 154 +++++++++++++++
 drivers/tty/vt/vt.c                |  11 +-
 fs/btrfs/Makefile                  |   2 +-
 fs/btrfs/disk-io.c                 |   3 +-
 fs/btrfs/main.c                    | 342 +++++++++++++++++++++++++++++++++
 fs/btrfs/raid56.c                  |   4 +-
 fs/btrfs/super.c                   |   6 +-
 fs/btrfs/volumes.c                 |   6 +-
 fs/ext4/Makefile                   |   2 +-
 fs/ext4/main.c                     | 347 ++++++++++++++++++++++++++++++++++
 fs/ext4/super.c                    |  11 +-
 fs/inode.c                         |  11 +-
 include/linux/buffer_head.h        |   2 +-
 include/linux/console.h            |   2 +-
 include/linux/rwlock.h             |  30 ++-
 include/linux/sched/signal.h       |   3 +-
 include/linux/spinlock.h           |  23 +--
 35 files changed, 1599 insertions(+), 273 deletions(-)

diff --git a/Makefile b/Makefile
index 165cf9783a5dbb..fba93e651b0ae8 100644
--- a/Makefile
+++ b/Makefile
@@ -800,8 +800,8 @@ KBUILD_ARFLAGS := $(call ar-option,D)
 
 # check for 'asm goto'
 ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
-	KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
-	KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
+	KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO1
+	KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO1
 endif
 
 include scripts/Makefile.kasan
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 7acb51c49fec46..6d4e54387db442 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -46,7 +46,7 @@
  * Macros to generate condition code outputs from inline assembly,
  * The output operand must be type "bool".
  */
-#ifdef __GCC_ASM_FLAG_OUTPUTS__
+#ifdef __GCC_ASM_FLAG_OUTPUTS__1
 # define CC_SET(c) "\n\t/* output condition code " #c "*/\n"
 # define CC_OUT(c) "=@cc" #c
 #else
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 14635c5ea02513..aa64a05ca1e050 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -47,9 +47,7 @@ static __always_inline void atomic_set(atomic_t *v, int i)
  */
 static __always_inline void atomic_add(int i, atomic_t *v)
 {
-	asm volatile(LOCK_PREFIX "addl %1,%0"
-		     : "+m" (v->counter)
-		     : "ir" (i));
+	v->counter += i;
 }
 
 /**
@@ -61,9 +59,7 @@ static __always_inline void atomic_add(int i, atomic_t *v)
  */
 static __always_inline void atomic_sub(int i, atomic_t *v)
 {
-	asm volatile(LOCK_PREFIX "subl %1,%0"
-		     : "+m" (v->counter)
-		     : "ir" (i));
+	v->counter -= i;
 }
 
 /**
@@ -77,7 +73,8 @@ static __always_inline void atomic_sub(int i, atomic_t *v)
  */
 static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
 {
-	GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
+	v->counter -= i;
+	return v->counter == 0;
 }
 
 /**
@@ -88,8 +85,7 @@ static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
  */
 static __always_inline void atomic_inc(atomic_t *v)
 {
-	asm volatile(LOCK_PREFIX "incl %0"
-		     : "+m" (v->counter));
+	++v->counter;
 }
 
 /**
@@ -100,8 +96,7 @@ static __always_inline void atomic_inc(atomic_t *v)
  */
 static __always_inline void atomic_dec(atomic_t *v)
 {
-	asm volatile(LOCK_PREFIX "decl %0"
-		     : "+m" (v->counter));
+	--v->counter;
 }
 
 /**
@@ -114,7 +109,7 @@ static __always_inline void atomic_dec(atomic_t *v)
  */
 static __always_inline bool atomic_dec_and_test(atomic_t *v)
 {
-	GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
+	return --v->counter == 0;
 }
 
 /**
@@ -127,7 +122,7 @@ static __always_inline bool atomic_dec_and_test(atomic_t *v)
  */
 static __always_inline bool atomic_inc_and_test(atomic_t *v)
 {
-	GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
+	return ++v->counter == 0;
 }
 
 /**
@@ -141,7 +136,8 @@ static __always_inline bool atomic_inc_and_test(atomic_t *v)
  */
 static __always_inline bool atomic_add_negative(int i, atomic_t *v)
 {
-	GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
+	v->counter += i;
+	return v->counter < 0;
 }
 
 /**
@@ -153,7 +149,8 @@ static __always_inline bool atomic_add_negative(int i, atomic_t *v)
  */
 static __always_inline int atomic_add_return(int i, atomic_t *v)
 {
-	return i + xadd(&v->counter, i);
+	v->counter += i;
+	return v->counter;
 }
 
 /**
@@ -183,12 +180,17 @@ static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
 
 static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
-	return cmpxchg(&v->counter, old, new);
+	int mold = v->counter;
+	if (mold == old)
+		v->counter = new;
+	return mold;
 }
 
 static inline int atomic_xchg(atomic_t *v, int new)
 {
-	return xchg(&v->counter, new);
+	int old = v->counter;
+	v->counter = new;
+	return old;
 }
 
 #define ATOMIC_OP(op)							\
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 89ed2f6ae2f76a..b7ed88038116ca 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -56,9 +56,7 @@ static __always_inline void atomic64_add(long i, atomic64_t *v)
  */
 static inline void atomic64_sub(long i, atomic64_t *v)
 {
-	asm volatile(LOCK_PREFIX "subq %1,%0"
-		     : "=m" (v->counter)
-		     : "er" (i), "m" (v->counter));
+	v->counter -= i;
 }
 
 /**
@@ -72,7 +70,8 @@ static inline void atomic64_sub(long i, atomic64_t *v)
  */
 static inline bool atomic64_sub_and_test(long i, atomic64_t *v)
 {
-	GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
+	v->counter -= i;
+	return v->counter == 0;
 }
 
 /**
@@ -83,9 +82,7 @@ static inline bool atomic64_sub_and_test(long i, atomic64_t *v)
  */
 static __always_inline void atomic64_inc(atomic64_t *v)
 {
-	asm volatile(LOCK_PREFIX "incq %0"
-		     : "=m" (v->counter)
-		     : "m" (v->counter));
+	v->counter++;
 }
 
 /**
@@ -96,9 +93,7 @@ static __always_inline void atomic64_inc(atomic64_t *v)
  */
 static __always_inline void atomic64_dec(atomic64_t *v)
 {
-	asm volatile(LOCK_PREFIX "decq %0"
-		     : "=m" (v->counter)
-		     : "m" (v->counter));
+	v->counter--;
 }
 
 /**
@@ -111,7 +106,7 @@ static __always_inline void atomic64_dec(atomic64_t *v)
  */
 static inline bool atomic64_dec_and_test(atomic64_t *v)
 {
-	GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
+	return --v->counter == 0;
 }
 
 /**
@@ -124,7 +119,7 @@ static inline bool atomic64_dec_and_test(atomic64_t *v)
  */
 static inline bool atomic64_inc_and_test(atomic64_t *v)
 {
-	GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
+	return ++v->counter == 0;
 }
 
 /**
@@ -138,7 +133,8 @@ static inline bool atomic64_inc_and_test(atomic64_t *v)
  */
 static inline bool atomic64_add_negative(long i, atomic64_t *v)
 {
-	GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
+	v->counter += i;
+	return v->counter < 0;
 }
 
 /**
@@ -150,7 +146,8 @@ static inline bool atomic64_add_negative(long i, atomic64_t *v)
  */
 static __always_inline long atomic64_add_return(long i, atomic64_t *v)
 {
-	return i + xadd(&v->counter, i);
+	v->counter += i;
+	return v->counter;
 }
 
 static inline long atomic64_sub_return(long i, atomic64_t *v)
@@ -173,12 +170,17 @@ static inline long atomic64_fetch_sub(long i, atomic64_t *v)
 
 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
 {
-	return cmpxchg(&v->counter, old, new);
+	long mold = v->counter;
+	if (mold == old)
+		v->counter = new;
+	return mold;
 }
 
 static inline long atomic64_xchg(atomic64_t *v, long new)
 {
-	return xchg(&v->counter, new);
+	long old = v->counter;
+	v->counter = new;
+	return old;
 }
 
 /**
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 854022772c5be4..e4c35787fd1f33 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -53,6 +53,23 @@
 #define CONST_MASK_ADDR(nr, addr)	BITOP_ADDR((void *)(addr) + ((nr)>>3))
 #define CONST_MASK(nr)			(1 << ((nr) & 7))
 
+/**
+ * __set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+	*p |= mask;
+}
+
 /**
  * set_bit - Atomically set a bit in memory
  * @nr: the bit to set
@@ -71,29 +88,15 @@
 static __always_inline void
 set_bit(long nr, volatile unsigned long *addr)
 {
-	if (IS_IMMEDIATE(nr)) {
-		asm volatile(LOCK_PREFIX "orb %1,%0"
-			: CONST_MASK_ADDR(nr, addr)
-			: "iq" ((u8)CONST_MASK(nr))
-			: "memory");
-	} else {
-		asm volatile(LOCK_PREFIX "bts %1,%0"
-			: BITOP_ADDR(addr) : "Ir" (nr) : "memory");
-	}
+	__set_bit(nr, addr);
 }
 
-/**
- * __set_bit - Set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * Unlike set_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
+static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
 {
-	asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+	*p &= ~mask;
 }
 
 /**
@@ -109,15 +112,7 @@ static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
 static __always_inline void
 clear_bit(long nr, volatile unsigned long *addr)
 {
-	if (IS_IMMEDIATE(nr)) {
-		asm volatile(LOCK_PREFIX "andb %1,%0"
-			: CONST_MASK_ADDR(nr, addr)
-			: "iq" ((u8)~CONST_MASK(nr)));
-	} else {
-		asm volatile(LOCK_PREFIX "btr %1,%0"
-			: BITOP_ADDR(addr)
-			: "Ir" (nr));
-	}
+	__clear_bit(nr, addr);
 }
 
 /*
@@ -134,11 +129,6 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad
 	clear_bit(nr, addr);
 }
 
-static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
-{
-	asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
-}
-
 static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
 {
 	bool negative;
@@ -181,7 +171,10 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *
  */
 static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
 {
-	asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+	*p ^= mask;
 }
 
 /**
@@ -195,15 +188,26 @@ static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
  */
 static __always_inline void change_bit(long nr, volatile unsigned long *addr)
 {
-	if (IS_IMMEDIATE(nr)) {
-		asm volatile(LOCK_PREFIX "xorb %1,%0"
-			: CONST_MASK_ADDR(nr, addr)
-			: "iq" ((u8)CONST_MASK(nr)));
-	} else {
-		asm volatile(LOCK_PREFIX "btc %1,%0"
-			: BITOP_ADDR(addr)
-			: "Ir" (nr));
-	}
+	__change_bit(nr, addr);
+}
+
+/**
+ * __test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail.  You must protect multiple accesses with a lock.
+ */
+static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	unsigned long old = *p;
+
+	*p = old | mask;
+	return (old & mask) != 0;
 }
 
 /**
@@ -216,7 +220,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
  */
 static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
 {
-	GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c);
+	return __test_and_set_bit(nr, addr);
 }
 
 /**
@@ -233,23 +237,29 @@ test_and_set_bit_lock(long nr, volatile unsigned long *addr)
 }
 
 /**
- * __test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
+ * __test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
  * @addr: Address to count from
  *
  * This operation is non-atomic and can be reordered.
  * If two examples of this operation race, one can appear to succeed
  * but actually fail.  You must protect multiple accesses with a lock.
+ *
+ * Note: the operation is performed atomically with respect to
+ * the local CPU, but not other CPUs. Portable code should not
+ * rely on this behaviour.
+ * KVM relies on this behaviour on x86 for modifying memory that is also
+ * accessed from a hypervisor on the same CPU if running in a VM: don't change
+ * this without also updating arch/x86/kernel/kvm.c
  */
-static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
 {
-	bool oldbit;
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	unsigned long old = *p;
 
-	asm("bts %2,%1\n\t"
-	    CC_SET(c)
-	    : CC_OUT(c) (oldbit), ADDR
-	    : "Ir" (nr));
-	return oldbit;
+	*p = old & ~mask;
+	return (old & mask) != 0;
 }
 
 /**
@@ -262,47 +272,18 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
  */
 static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
 {
-	GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c);
-}
-
-/**
- * __test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail.  You must protect multiple accesses with a lock.
- *
- * Note: the operation is performed atomically with respect to
- * the local CPU, but not other CPUs. Portable code should not
- * rely on this behaviour.
- * KVM relies on this behaviour on x86 for modifying memory that is also
- * accessed from a hypervisor on the same CPU if running in a VM: don't change
- * this without also updating arch/x86/kernel/kvm.c
- */
-static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
-{
-	bool oldbit;
-
-	asm volatile("btr %2,%1\n\t"
-		     CC_SET(c)
-		     : CC_OUT(c) (oldbit), ADDR
-		     : "Ir" (nr));
-	return oldbit;
+	return __test_and_clear_bit(nr, addr);
 }
 
 /* WARNING: non atomic and it can be reordered! */
 static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
 {
-	bool oldbit;
-
-	asm volatile("btc %2,%1\n\t"
-		     CC_SET(c)
-		     : CC_OUT(c) (oldbit), ADDR
-		     : "Ir" (nr) : "memory");
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	unsigned long old = *p;
 
-	return oldbit;
+	*p = old ^ mask;
+	return (old & mask) != 0;
 }
 
 /**
@@ -315,7 +296,7 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
  */
 static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
 {
-	GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c);
+	return __test_and_change_bit(nr, addr);
 }
 
 static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
@@ -326,14 +307,7 @@ static __always_inline bool constant_test_bit(long nr, const volatile unsigned l
 
 static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
 {
-	bool oldbit;
-
-	asm volatile("bt %2,%1\n\t"
-		     CC_SET(c)
-		     : CC_OUT(c) (oldbit)
-		     : "m" (*(unsigned long *)addr), "Ir" (nr));
-
-	return oldbit;
+	return constant_test_bit(nr, addr);
 }
 
 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
@@ -358,10 +332,33 @@ static bool test_bit(int nr, const volatile unsigned long *addr);
  */
 static __always_inline unsigned long __ffs(unsigned long word)
 {
-	asm("rep; bsf %1,%0"
-		: "=r" (word)
-		: "rm" (word));
-	return word;
+	int num = 0;
+
+#if BITS_PER_LONG == 64
+	if ((word & 0xffffffff) == 0) {
+		num += 32;
+		word >>= 32;
+	}
+#endif
+	if ((word & 0xffff) == 0) {
+		num += 16;
+		word >>= 16;
+	}
+	if ((word & 0xff) == 0) {
+		num += 8;
+		word >>= 8;
+	}
+	if ((word & 0xf) == 0) {
+		num += 4;
+		word >>= 4;
+	}
+	if ((word & 0x3) == 0) {
+		num += 2;
+		word >>= 2;
+	}
+	if ((word & 0x1) == 0)
+		num += 1;
+	return num;
 }
 
 /**
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index ba38ebbaced3cd..06478d83028d9b 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -25,9 +25,12 @@ do {								\
 } while (0)
 
 #else
+extern void __assert_fail(const char *__assertion, const char *__file,
+		unsigned int __line, const char *__function)
+		__attribute__ ((__noreturn__));
 #define BUG()							\
 do {								\
-	asm volatile("ud2");					\
+	__assert_fail("BUG", __FILE__, __LINE__, __PRETTY_FUNCTION__); \
 	unreachable();						\
 } while (0)
 #endif
diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h
index 9476c04ee6357e..89a17a4aedaa84 100644
--- a/arch/x86/include/asm/current.h
+++ b/arch/x86/include/asm/current.h
@@ -11,7 +11,7 @@ DECLARE_PER_CPU(struct task_struct *, current_task);
 
 static __always_inline struct task_struct *get_current(void)
 {
-	return this_cpu_read_stable(current_task);
+	return current_task;
 }
 
 #define current get_current()
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index ac7692dcfa2e86..6caac0e8fce31c 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -14,38 +14,19 @@
 
 static inline unsigned long native_save_fl(void)
 {
-	unsigned long flags;
-
-	/*
-	 * "=rm" is safe here, because "pop" adjusts the stack before
-	 * it evaluates its effective address -- this is part of the
-	 * documented behavior of the "pop" instruction.
-	 */
-	asm volatile("# __raw_save_flags\n\t"
-		     "pushf ; pop %0"
-		     : "=rm" (flags)
-		     : /* no input */
-		     : "memory");
-
-	return flags;
+	return 0;
 }
 
 static inline void native_restore_fl(unsigned long flags)
 {
-	asm volatile("push %0 ; popf"
-		     : /* no output */
-		     :"g" (flags)
-		     :"memory", "cc");
 }
 
 static inline void native_irq_disable(void)
 {
-	asm volatile("cli": : :"memory");
 }
 
 static inline void native_irq_enable(void)
 {
-	asm volatile("sti": : :"memory");
 }
 
 static inline __cpuidle void native_safe_halt(void)
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index cf8f619b305fd8..8a287067796517 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -76,6 +76,7 @@ extern bool __virt_addr_valid(unsigned long kaddr);
 #include <asm-generic/getorder.h>
 
 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#define WANT_PAGE_VIRTUAL 1
 
 #endif	/* __KERNEL__ */
 #endif /* _ASM_X86_PAGE_H */
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index ec1f3c6511506e..12178bf1b66ee1 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -19,18 +19,12 @@ DECLARE_PER_CPU(int, __preempt_count);
  */
 static __always_inline int preempt_count(void)
 {
-	return raw_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED;
+	return __preempt_count & ~PREEMPT_NEED_RESCHED;
 }
 
 static __always_inline void preempt_count_set(int pc)
 {
-	int old, new;
-
-	do {
-		old = raw_cpu_read_4(__preempt_count);
-		new = (old & PREEMPT_NEED_RESCHED) |
-			(pc & ~PREEMPT_NEED_RESCHED);
-	} while (raw_cpu_cmpxchg_4(__preempt_count, old, new) != old);
+	__preempt_count = pc;
 }
 
 /*
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index f385eca5407a0f..b448fdb6177ee9 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -768,9 +768,6 @@ extern char			ignore_fpu_irq;
  */
 static inline void prefetch(const void *x)
 {
-	alternative_input(BASE_PREFETCH, "prefetchnta %P1",
-			  X86_FEATURE_XMM,
-			  "m" (*(const char *)x));
 }
 
 /*
@@ -780,9 +777,6 @@ static inline void prefetch(const void *x)
  */
 static inline void prefetchw(const void *x)
 {
-	alternative_input(BASE_PREFETCH, "prefetchw %P1",
-			  X86_FEATURE_3DNOWPREFETCH,
-			  "m" (*(const char *)x));
 }
 
 static inline void spin_lock_prefetch(const void *x)
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index ea148313570f6e..360e00e03fd336 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -162,18 +162,15 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
  * Clang/LLVM cares about the size of the register, but still wants
  * the base register for something that ends up being a pair.
  */
+extern void klee_make_symbolic(void *addr, size_t nbytes, const char *name);
 #define get_user(x, ptr)						\
 ({									\
-	int __ret_gu;							\
-	register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX);		\
-	register void *__sp asm(_ASM_SP);				\
-	__chk_user_ptr(ptr);						\
-	might_fault();							\
-	asm volatile("call __get_user_%P4"				\
-		     : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp)	\
-		     : "0" (ptr), "i" (sizeof(*(ptr))));		\
-	(x) = (__force __typeof__(*(ptr))) __val_gu;			\
-	__builtin_expect(__ret_gu, 0);					\
+ 	__typeof__(*(ptr)) __yyy; \
+ 	klee_make_symbolic(&(__yyy), sizeof(__yyy), "get_user");	\
+ 	(x) = __yyy; \
+	if (0)								\
+		(x) = (__force __typeof__(*(ptr))) *ptr;		\
+ 	0; \
 })
 
 #define __put_user_x(size, x, ptr, __ret_pu)			\
@@ -247,29 +244,9 @@ extern void __put_user_8(void);
  */
 #define put_user(x, ptr)					\
 ({								\
-	int __ret_pu;						\
-	__typeof__(*(ptr)) __pu_val;				\
-	__chk_user_ptr(ptr);					\
-	might_fault();						\
-	__pu_val = x;						\
-	switch (sizeof(*(ptr))) {				\
-	case 1:							\
-		__put_user_x(1, __pu_val, ptr, __ret_pu);	\
-		break;						\
-	case 2:							\
-		__put_user_x(2, __pu_val, ptr, __ret_pu);	\
-		break;						\
-	case 4:							\
-		__put_user_x(4, __pu_val, ptr, __ret_pu);	\
-		break;						\
-	case 8:							\
-		__put_user_x8(__pu_val, ptr, __ret_pu);		\
-		break;						\
-	default:						\
-		__put_user_x(X, __pu_val, ptr, __ret_pu);	\
-		break;						\
-	}							\
-	__builtin_expect(__ret_pu, 0);				\
+ 	if (0)							\
+	*(ptr) = (__force __typeof__(*(ptr)))(x);		\
+ 	0; \
 })
 
 #define __put_user_size(x, ptr, size, retval, errret)			\
diff --git a/common.c b/common.c
new file mode 100644
index 00000000000000..9b8e2a96ff96e9
--- /dev/null
+++ b/common.c
@@ -0,0 +1,377 @@
+#include <linux/cpumask.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slub_def.h>
+#include <linux/workqueue.h>
+
+unsigned my_make_symbolic;
+const char *my_make_symbolic_str;
+
+extern void *malloc(size_t size);
+extern void free(void *ptr);
+
+extern   __attribute__((noreturn))
+void klee_report_error(const char *file, int line, const char *message,
+		const char *suffix);
+extern void klee_warning(const char *message);
+extern int klee_int(const char *name);
+extern void klee_make_symbolic(void *addr, size_t nbytes, const char *name);
+
+/**************************************/
+
+#include <linux/sched.h>
+struct signal_struct curr_sig;
+struct thread_info tinfo;
+struct task_struct curr = {
+	.signal = &curr_sig,
+	.stack = &tinfo,
+};
+struct task_struct *current_task = &curr;
+
+/* percpu */
+struct pt_regs *irq_regs;
+
+int __preempt_count;
+struct boot_params boot_params;
+atomic_t system_freezing_cnt;
+unsigned long kernel_stack;
+struct workqueue_struct *system_unbound_wq;
+const struct sysfs_ops kobj_sysfs_ops;
+struct kobject *fs_kobj;
+struct workqueue_struct* system_wq;
+volatile unsigned long jiffies;
+int oops_in_progress;
+int panic_timeout = CONFIG_PANIC_TIMEOUT;
+int suid_dumpable = 0;
+unsigned long phys_base;
+
+#include <linux/user_namespace.h>
+struct user_namespace init_user_ns;
+#if 0
+#include <linux/init_task.h>
+struct pid init_struct_pid = INIT_STRUCT_PID;
+#include <linux/fdtable.h>
+struct files_struct init_files;
+#endif
+#include <linux/sched.h>
+struct task_struct init_task; // = INIT_TASK(init_task);
+struct user_struct root_user;
+
+struct cpuinfo_x86 boot_cpu_data;
+struct pglist_data contig_page_data;
+struct mem_section *mem_section[NR_SECTION_ROOTS];
+//const struct cpumask * const cpu_possible_mask;
+int hashdist;
+struct tss_struct cpu_tss;
+
+/**************************************/
+
+char *strreplace(char *s, char old, char new)
+{
+	for (; *s; ++s)
+		if (*s == old)
+			*s = new;
+	return s;
+}
+
+size_t strlen(const char *s)
+{
+	const char *sc;
+
+	for (sc = s; *sc != '\0'; ++sc)
+		/* nothing */;
+	return sc - s;
+}
+
+size_t strlcpy(char *dest, const char *src, size_t size)
+{
+	size_t ret = strlen(src);
+
+	if (size) {
+		size_t len = (ret >= size) ? size - 1 : ret;
+		memcpy(dest, src, len);
+		dest[len] = '\0';
+	}
+	return ret;
+}
+
+unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
+{
+#ifdef BETTER_USER
+	memcpy(to, from, n);
+	return klee_int("_copy_from_user");
+#else
+	char buf[n];
+	klee_make_symbolic(buf, n, "_copy_from_user");
+	memcpy(to, buf, n);
+	return 0;
+#endif
+}
+
+unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
+{
+#ifdef BETTER_USER
+	memcpy(to, from, n);
+	return klee_int("_copy_to_user");
+#else
+	return 0;
+#endif
+}
+
+char *kstrdup(const char *s, gfp_t gfp)
+{
+	size_t len = strlen(s);
+	char *ret = malloc(len);
+	if (ret)
+		memcpy(ret, s, len);
+	return ret;
+}
+
+void *__kmalloc(size_t size, gfp_t flags)
+{
+	void *ret = malloc(size);
+
+	if (ret && flags & __GFP_ZERO)
+		memset(ret, 0, size);
+
+/*	if (ret && my_make_symbolic--)
+		klee_make_symbolic(ret, size, my_make_symbolic_str);*/
+
+	return ret;
+}
+
+void kfree(const void *x)
+{
+	void *a = (void *)x;
+	free(a);
+}
+
+void *vmalloc(unsigned long size)
+{
+	return __kmalloc(size, GFP_KERNEL);
+}
+
+void vfree(const void *x)
+{
+	kfree(x);
+}
+
+struct kmem_cache *
+kmem_cache_create(const char *name, size_t size, size_t align,
+		  unsigned long flags, void (*ctor)(void *))
+{
+	struct kmem_cache *ret;
+
+	ret = malloc(sizeof(*ret));
+	if (ret) {
+		ret->object_size = size;
+		ret->ctor = ctor;
+	}
+
+	return ret;
+}
+
+void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+{
+	void *ret;
+
+	if (!cachep)
+		klee_report_error(__FILE__, __LINE__, "cachep is NULL",
+				"kmem.err");
+
+	ret = malloc(cachep->object_size);
+	if (ret) {
+		if (flags & __GFP_ZERO)
+			memset(ret, 0, cachep->object_size);
+
+		if (cachep->ctor)
+			cachep->ctor(ret);
+	}
+
+	return ret;
+}
+
+void kmem_cache_free(struct kmem_cache *s, void *x)
+{
+	free(x);
+}
+
+unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
+{
+	return (unsigned long)malloc(4096 * (1 << order));
+}
+
+int printk(const char *fmt, ...)
+{
+//	klee_warning(fmt);
+	return 0;
+}
+
+void __pr_emerg(const char *fmt, ...) {}
+void __pr_alert(const char *fmt, ...) {}
+void __pr_crit(const char *fmt, ...) {}
+void __pr_err(const char *fmt, ...) {}
+void __pr_warn(const char *fmt, ...) {}
+void __pr_notice(const char *fmt, ...) {}
+void __pr_info(const char *fmt, ...) {}
+
+void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
+{
+	klee_report_error(file, line, fmt, "warn");
+}
+
+void warn_slowpath_null(const char *file, int line)
+{
+	klee_report_error(file, line, "warn_slowpath_null", "warn");
+}
+
+#include <linux/ratelimit.h>
+int ___ratelimit(struct ratelimit_state *rs, const char *func)
+{
+	return 0;
+}
+
+void __mutex_init(struct mutex *lock, const char *name,
+		struct lock_class_key *key)
+{
+}
+
+void mutex_lock(struct mutex *lock)
+{
+}
+
+int mutex_lock_interruptible(struct mutex *lock)
+{
+	return klee_int("mutex_lock_interruptible");
+}
+
+int mutex_trylock(struct mutex *lock)
+{
+	return klee_int("mutex_trylock");
+}
+
+void mutex_unlock(struct mutex *lock)
+{
+}
+
+void down_read(struct rw_semaphore *sem)
+{
+}
+
+void up_read(struct rw_semaphore *sem)
+{
+}
+
+void down_write(struct rw_semaphore *sem)
+{
+}
+
+void up_write(struct rw_semaphore *sem)
+{
+}
+
+void __init_rwsem(struct rw_semaphore *sem, const char *name,
+		  struct lock_class_key *key)
+{
+}
+
+void __init_waitqueue_head(wait_queue_head_t *q, const char *name,
+		struct lock_class_key *key)
+{
+}
+
+bool queue_work_on(int cpu, struct workqueue_struct *wq,
+		   struct work_struct *work)
+{
+	return klee_int("queue_work_on");
+}
+
+void __wake_up(wait_queue_head_t *q, unsigned int mode,
+			int nr_exclusive, void *key)
+{
+}
+
+long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state)
+{
+	return 0;
+}
+
+void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
+{
+}
+
+void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
+{
+}
+
+void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
+{
+}
+
+unsigned long get_seconds(void)
+{
+	return 10000;
+}
+
+void schedule(void)
+{
+}
+
+signed long schedule_timeout(signed long timeout)
+{
+	long ret = klee_int("schedule_timeout");
+	return ret < 0 ? -ret : ret;
+}
+
+bool capable(int cap)
+{
+	return !!klee_int("capable");
+}
+
+pid_t pid_vnr(struct pid *pid) 
+{
+	return klee_int("pid_vnr");
+}
+
+void put_pid(struct pid *pid)
+{
+}
+
+void kill_fasync(struct fasync_struct **fp, int sig, int band)
+{
+}
+
+unsigned long msleep_interruptible(unsigned int msecs)
+{
+	return 0;
+}
+
+void msleep(unsigned int msecs)
+{
+}
+
+int mod_timer(struct timer_list *timer, unsigned long expires)
+{
+	return 0;
+}
+
+int del_timer(struct timer_list *timer)
+{
+	return 0;
+}
+
+int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
+			       unsigned long val, void *v)
+{
+	return NOTIFY_DONE;
+}
+
+int input_handler_for_each_handle(struct input_handler *handler, void *data,
+				  int (*fn)(struct input_handle *, void *))
+{
+	return 0;
+}
+
+void __tasklet_schedule(struct tasklet_struct *t)
+{
+}
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index b95bed92da9f05..d86a2fd57f7e23 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -1,5 +1,5 @@
 obj-$(CONFIG_TTY)		+= tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \
-				   tty_buffer.o tty_port.o tty_mutex.o tty_ldsem.o
+				   tty_buffer.o tty_port.o tty_mutex.o tty_ldsem.o main.o
 obj-$(CONFIG_LEGACY_PTYS)	+= pty.o
 obj-$(CONFIG_UNIX98_PTYS)	+= pty.o
 obj-$(CONFIG_AUDIT)		+= tty_audit.o
diff --git a/drivers/tty/main.c b/drivers/tty/main.c
new file mode 100644
index 00000000000000..e14b46957a5f5b
--- /dev/null
+++ b/drivers/tty/main.c
@@ -0,0 +1,161 @@
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+
+#include "../../common.c"
+
+struct console *console_drivers;
+
+#include <linux/keyboard.h>
+ushort *key_maps[MAX_NR_KEYMAPS];
+unsigned int accent_table_size;
+char *func_table[MAX_NR_FUNC];
+char func_buf[] = { };
+char *funcbufptr = func_buf;
+int funcbufsize = sizeof(func_buf);
+unsigned int keymap_count = 7;
+int funcbufleft = 0;
+
+struct device *class_find_device(struct class *class, struct device *start,
+		const void *data, int (*match)(struct device *, const void *))
+{
+	return NULL;
+}
+
+/* ================== */
+static struct tty_driver *tty_drv;
+
+static struct tty_struct *alloc_tty(const char *name)
+{
+	struct tty_port *port;
+	struct tty_struct *tty;
+
+	my_make_symbolic = 1;
+	my_make_symbolic_str = name;
+	tty = alloc_tty_struct(tty_drv, 0);
+
+	port = malloc(sizeof(*port));
+	tty_port_init(port);
+
+	tty->port = port;
+	tty->ops = tty_drv->ops;
+
+	return tty;
+}
+
+static int write(struct tty_struct * tty,
+		      const unsigned char *buf, int count)
+
+{
+	return count;
+}
+
+static int break_ctl(struct tty_struct *tty, int state)
+{
+	return klee_int("break_ctl");
+}
+
+static int chars_in_buffer(struct tty_struct *tty)
+{
+	return klee_int("chars_in_buffer");
+}
+
+#define TTY_LINES 1
+
+static int prepare_driver(void)
+{
+	static struct tty_operations tty_ops = {
+		.write = write,
+		.break_ctl = break_ctl,
+		.chars_in_buffer = chars_in_buffer,
+	};
+
+	my_make_symbolic = 1;
+	my_make_symbolic_str = "tty_driver";
+	tty_drv = tty_alloc_driver(TTY_LINES, 0);
+	if (IS_ERR(tty_drv)) {
+		klee_warning("no driver\n");
+		printf("err=%d\n", PTR_ERR(tty_drv));
+		return -ENOMEM;
+	}
+	tty_set_operations(tty_drv, &tty_ops);
+
+	tty_drv->name = "ktd";
+	tty_drv->name_base = 0;
+	if (tty_drv->type == TTY_DRIVER_TYPE_PTY)
+		return -EINVAL;
+	/* fooling :) */
+	tty_drv->flags |= TTY_DRIVER_INSTALLED;
+
+	return 0;
+}
+
+static int prepare(void)
+{
+	int ret;
+
+	n_tty_init();
+
+	ret = prepare_driver();
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+extern ssize_t tty_write(struct file *file, const char __user *buf,
+		size_t count, loff_t *ppos);
+
+int main()
+{
+	struct tty_file_private tfp;
+	struct file_operations fop;
+	struct inode inode;
+	struct file file = {
+		.f_op = &fop,
+		.f_inode = &inode,
+		.private_data = &tfp,
+	};
+	struct tty_struct *tty;
+	unsigned int cmd;
+	unsigned long arg;
+	loff_t pos = 0;
+
+	if (prepare())
+		return 0;
+
+	klee_make_symbolic(&cmd, sizeof(cmd), "cmd");
+	if (0 && klee_int("pointer_arg")) {
+#define ARG_SIZE 1024
+		void *ptr = malloc(ARG_SIZE);
+		klee_make_symbolic(ptr, ARG_SIZE, "arg");
+		arg = (unsigned long)ptr;
+	} else
+		klee_make_symbolic(&arg, sizeof(arg), "arg");
+
+	tty = alloc_tty("tty");
+	tfp.tty = tty;
+
+	if (1 || klee_int("curr->signal->tty==tty"))
+		curr_sig.tty = tty;
+
+	my_make_symbolic = 1;
+	my_make_symbolic_str = "n_tty_data";
+	tty_ldisc_setup(tty, NULL);
+
+//	klee_warning("doing ioctl:\n");
+//	tty_ioctl(&file, cmd, arg);
+#define BUF_SIZE 256
+	void *buf = malloc(BUF_SIZE);
+	klee_make_symbolic(buf, BUF_SIZE, "buf");
+	void *flg = malloc(BUF_SIZE);
+	klee_make_symbolic(flg, BUF_SIZE, "flg");
+
+	tty->termios.c_oflag = klee_int("oflag");
+	tty->termios.c_iflag = klee_int("iflag");
+	tty->termios.c_cflag = klee_int("cflag");
+	tty->termios.c_lflag = klee_int("lflag");
+	//tty_write(&file, NULL, BUF_SIZE, &pos);
+	tty->ldisc->ops->set_termios(tty, NULL);
+	tty_ldisc_receive_buf(tty->ldisc, buf, flg, BUF_SIZE);
+	return 0;
+}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index bdf0e6e8999148..43c8334f8b4bd1 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2438,7 +2438,7 @@ static int n_tty_ioctl(struct tty_struct *tty, struct file *file,
 	}
 }
 
-static struct tty_ldisc_ops n_tty_ops = {
+struct tty_ldisc_ops n_tty_ops = {
 	.magic           = TTY_LDISC_MAGIC,
 	.name            = "n_tty",
 	.open            = n_tty_open,
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index e6d1a6510886c5..8b346161325124 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -140,7 +140,7 @@ LIST_HEAD(tty_drivers);			/* linked list of tty drivers */
 DEFINE_MUTEX(tty_mutex);
 
 static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
-static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
+ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
 ssize_t redirected_tty_write(struct file *, const char __user *,
 							size_t, loff_t *);
 static unsigned int tty_poll(struct file *, poll_table *);
@@ -1234,7 +1234,7 @@ void tty_write_message(struct tty_struct *tty, char *msg)
  *	write method will not be invoked in parallel for each device.
  */
 
-static ssize_t tty_write(struct file *file, const char __user *buf,
+ssize_t tty_write(struct file *file, const char __user *buf,
 						size_t count, loff_t *ppos)
 {
 	struct tty_struct *tty = file_tty(file);
diff --git a/drivers/tty/vt/Makefile b/drivers/tty/vt/Makefile
index 17ae94cb29f8e7..8c0a377b4d00cb 100644
--- a/drivers/tty/vt/Makefile
+++ b/drivers/tty/vt/Makefile
@@ -4,7 +4,7 @@
 FONTMAPFILE = cp437.uni
 
 obj-$(CONFIG_VT)			+= vt_ioctl.o vc_screen.o \
-					   selection.o keyboard.o
+					   selection.o keyboard.o main.o
 obj-$(CONFIG_CONSOLE_TRANSLATIONS)	+= consolemap.o consolemap_deftbl.o
 obj-$(CONFIG_HW_CONSOLE)		+= vt.o defkeymap.o
 
diff --git a/drivers/tty/vt/main.c b/drivers/tty/vt/main.c
new file mode 100644
index 00000000000000..e7933a090cd6bc
--- /dev/null
+++ b/drivers/tty/vt/main.c
@@ -0,0 +1,154 @@
+#include <linux/tty.h>
+#include <linux/console.h>
+#include <linux/kd.h>
+#include <linux/console_struct.h>
+#include <linux/kernel.h>
+
+#include "../../../common.c"
+
+#include <linux/keyboard.h>
+ushort *key_maps[MAX_NR_KEYMAPS];
+unsigned int accent_table_size;
+
+/**************************************/
+
+void console_lock(void)
+{
+}
+
+void console_unlock(void)
+{
+}
+
+void register_console(struct console *newcon)
+{
+}
+
+void __sched console_conditional_schedule(void)
+{
+}
+
+extern int __init con_init(void);
+extern int do_con_write(struct tty_struct *tty, const unsigned char *buf,
+		int count);
+extern void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c);
+
+#define BUF_SIZE 1024
+
+static void con_cursor(struct vc_data *c, int mode)
+{
+}
+
+static const char *con_startup(void)
+{
+    return "my_con";
+}
+
+static void my_con_init(struct vc_data *vc, int init)
+{
+    vc->vc_can_do_color = klee_int("vc_can_do_color");
+    if (init) {
+	vc->vc_cols = 80;
+	vc->vc_rows = 25;
+    } else
+	vc_resize(vc, 80, 25);
+}
+
+static int set_palette(struct vc_data *vc, const unsigned char *table)
+{
+	return 0;
+}
+
+static void con_putcs(struct vc_data *vc, const unsigned short *s,
+			int count, int ypos, int xpos)
+{
+}
+
+static int con_switch(struct vc_data *c)
+{
+	return 1;
+}
+
+static int con_scroll(struct vc_data *c, int t, int b, int dir, int lines)
+{
+	return 0;
+}
+
+static void con_clear(struct vc_data *c, int y, int x, int height, int width)
+{
+}
+
+int main(void)
+{
+	struct consw sw = {
+		.con_init = my_con_init,
+		.con_cursor = con_cursor,
+		.con_startup = con_startup,
+		.con_set_palette = set_palette,
+		.con_putcs = con_putcs,
+		.con_switch = con_switch,
+		.con_scroll = con_scroll,
+		.con_clear = con_clear,
+	};
+	struct tty_struct tty = {
+	};
+	struct vc_data *master_display_fg;
+	struct vc_data vc = {
+		.vc_cols = 80,
+		.vc_rows = 25,
+		.vc_sw = &sw,
+		.vc_display_fg = &master_display_fg,
+	};
+	unsigned a, b;
+	char *buf;
+
+	master_display_fg = &vc;
+	vc.vc_size_row = vc.vc_cols << 1;
+	vc.vc_screenbuf_size = vc.vc_rows * vc.vc_size_row;
+	vc.vc_screenbuf = malloc(vc.vc_screenbuf_size);
+	vc.vc_origin = (unsigned long)vc.vc_screenbuf;
+	vc.vc_visible_origin = vc.vc_origin;
+	vc.vc_scr_end = vc.vc_origin + vc.vc_screenbuf_size;
+	vc.vc_pos = vc.vc_origin + vc.vc_size_row * vc.vc_y + 2 * vc.vc_x;
+
+	current_task = malloc(sizeof(struct task_struct));
+	memset(current_task, 0, sizeof(struct task_struct));
+	current_task->pid = 1; /* let it be init :) */
+
+	conswitchp = &sw;
+/*	con_init();
+	tty.driver_data = vc_cons[0].d;
+	tty.port = &vc_cons[0].d->port;*/
+	tty.port = &vc.port;
+	tty_port_init(tty.port);
+
+	//for (a = 20; a < 21; a++) 
+	a = 10;
+	{
+		buf = malloc(a);
+		klee_make_symbolic(buf, a, "buf");
+		klee_assume(buf[0] == 27);
+		klee_assume(buf[1] == '[');
+		for (b = 2; b < a; b++) {
+			if (buf[b] != 0 && (buf[b] < '0' || buf[b] >= 127))
+				return 0;
+/*			klee_assume(buf[b] == 0 ||
+					(buf[b] >= '0' && buf[b] < 127));*/
+		}
+		int had_zero = 0;
+		for (b = 2; b < a; b++) {
+			if (buf[b] != 0 && had_zero)
+				return 0;
+			if (buf[b] == 0)
+				had_zero = 1;
+		}
+//		do_con_write(&tty, buf, a);
+
+		for (b = 0; b < a; b++)
+			do_con_trol(&tty, &vc, buf[b]);
+
+		free(buf);
+	}
+
+	return 0;
+}
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 5c4933bb4b5336..4e0364e9553c78 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1405,6 +1405,7 @@ static void csi_m(struct vc_data *vc)
 			break;
 		case 38:
 			i = vc_t416_color(vc, i, rgb_foreground);
+			printf("byl jsem tu\n");
 			break;
 		case 48:
 			i = vc_t416_color(vc, i, rgb_background);
@@ -1437,10 +1438,10 @@ static void csi_m(struct vc_data *vc)
 static void respond_string(const char *p, struct tty_port *port)
 {
 	while (*p) {
-		tty_insert_flip_char(port, *p, 0);
+//		tty_insert_flip_char(port, *p, 0);
 		p++;
 	}
-	tty_schedule_flip(port);
+//	tty_schedule_flip(port);
 }
 
 static void cursor_report(struct vc_data *vc, struct tty_struct *tty)
@@ -1745,7 +1746,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear)
 }
 
 /* console_lock is held */
-static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
+void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
 {
 	/*
 	 *  Control characters can be used in the _middle_
@@ -2184,7 +2185,7 @@ static void con_flush(struct vc_data *vc, unsigned long draw_from,
 }
 
 /* acquires console_lock */
-static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int count)
+int do_con_write(struct tty_struct *tty, const unsigned char *buf, int count)
 {
 	int c, tc, ok, n = 0, draw_x = -1;
 	unsigned int currcons;
@@ -2965,7 +2966,7 @@ static void vc_init(struct vc_data *vc, unsigned int rows,
  * the appropriate escape-sequence.
  */
 
-static int __init con_init(void)
+int __init con_init(void)
 {
 	const char *display_desc = NULL;
 	struct vc_data *vc;
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 128ce17a80b0fd..446edcf5928e17 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -9,7 +9,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
 	   export.o tree-log.o free-space-cache.o zlib.o lzo.o \
 	   compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
 	   reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \
-	   uuid-tree.o props.o hash.o free-space-tree.o
+	   uuid-tree.o props.o hash.o free-space-tree.o main.o
 
 btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
 btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 08b74daf35d05f..d4d088a3cb7654 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -267,7 +267,8 @@ out:
 
 u32 btrfs_csum_data(const char *data, u32 seed, size_t len)
 {
-	return btrfs_crc32c(seed, data, len);
+	extern int klee_int(const char *name);
+	return klee_int(__func__);//btrfs_crc32c(seed, data, len);
 }
 
 void btrfs_csum_final(u32 crc, u8 *result)
diff --git a/fs/btrfs/main.c b/fs/btrfs/main.c
new file mode 100644
index 00000000000000..ebc7c921d90b33
--- /dev/null
+++ b/fs/btrfs/main.c
@@ -0,0 +1,342 @@
+#include <linux/buffer_head.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/raid/pq.h>
+
+#include "ctree.h"
+#include "disk-io.h"
+#include "volumes.h"
+#include "btrfs_inode.h"
+
+#include "../../common.c"
+
+struct backing_dev_info default_backing_dev_info;
+const struct file_operations simple_dir_operations;
+struct raid6_calls raid6_call;
+const struct file_operations pipefifo_fops;
+const struct file_operations def_chr_fops;
+const struct file_operations def_blk_fops;
+const struct file_operations bad_sock_fops;
+
+struct backing_dev_info noop_backing_dev_info = {
+	.name           = "noop",
+	.capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
+};
+int sysctl_vfs_cache_pressure = 100;
+
+void (*raid6_2data_recov)(int, size_t, int, int, void **);
+void (*raid6_datap_recov)(int, size_t, int, void **);
+
+struct buffer_head *
+__bread_gfp(struct block_device *bdev, sector_t block,
+		                   unsigned size, gfp_t gfp)
+{
+	static struct buffer_head *blocks[100];
+	struct buffer_head *bh;
+
+	if (blocks[block]) {
+		klee_warning("block returned");
+		return blocks[block];
+	}
+	klee_warning("block alloc");
+
+	bh = malloc(sizeof(*bh));
+	if (bh) {
+		char buf[30];
+		bh->b_blocknr = block;
+		bh->b_size = size;
+		bh->b_bdev = bdev;
+		bh->b_data = malloc(size);
+		sprintf(buf, "bdata%lu", block);
+		klee_make_symbolic(bh->b_data, size, buf);
+		blocks[block] = bh;
+	}
+
+	return bh;
+}
+
+void __brelse(struct buffer_head * buf)
+{
+	if (atomic_read(&buf->b_count)) {
+		put_bh(buf);
+		return;
+	}
+}
+
+void get_filesystem(struct file_system_type *fs)
+{
+}
+
+int register_shrinker(struct shrinker *shrinker)
+{
+	return 0;
+}
+
+/**************************************/
+
+int init_srcu_struct(struct srcu_struct *sp)
+{
+	return 0;
+}
+
+void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+{
+}
+
+void wait_rcu_gp(call_rcu_func_t crf)
+{
+}
+
+int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
+{
+	return 0;
+}
+
+void invalidate_bdev(struct block_device *bdev)
+{
+}
+
+void inode_wait_for_writeback(struct inode *inode)
+{
+}
+
+void truncate_inode_pages_final(struct address_space *mapping)
+{
+}
+
+void mark_page_accessed(struct page *page)
+{
+}
+
+struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
+{
+	return malloc(1);
+}
+
+struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
+					       unsigned int flags,
+					       int max_active,
+					       struct lock_class_key *key,
+					       const char *lock_name, ...)
+{
+	return malloc(1);
+}
+
+void destroy_workqueue(struct workqueue_struct *wq)
+{
+	free(wq);
+}
+
+struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
+	int fgp_flags, gfp_t gfp_mask)
+{
+	struct page *pg = malloc(sizeof(*pg));
+	void *pg_data = malloc(4096);
+	memset(pg, 0, sizeof(*pg));
+	pg->virtual = pg_data;
+	return pg;
+}
+
+void unlock_page(struct page *page)
+{
+	clear_bit_unlock(PG_locked, &page->flags);
+}
+
+struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
+					void *holder)
+{
+	static struct inode bd_inode;
+	static struct request_queue queue;
+	static struct gendisk bd_disk = {
+		.queue = &queue,
+	};
+	static struct block_device bdev = {
+		.bd_inode = &bd_inode,
+		.bd_disk = &bd_disk,
+	};
+	klee_make_symbolic(&bd_inode, sizeof(bd_inode), "bd_inode");
+	bd_inode.i_bdev = &bdev;
+	bd_inode.i_mapping = (struct address_space *)&bd_inode;
+
+	return &bdev;
+}
+
+void blkdev_put(struct block_device *bdev, fmode_t mode)
+{
+}
+
+int bdev_read_only(struct block_device *bdev) 
+{
+	return klee_int(__func__);
+}
+
+const char *bdevname(struct block_device *bdev, char *buf)
+{
+	strcpy(buf, "sdb");
+	return buf;
+}
+
+
+struct page *read_cache_page_gfp(struct address_space *mapping,
+				pgoff_t index,
+				gfp_t gfp)
+{
+	struct page *page = malloc(sizeof(*page));
+	struct inode *inode = (struct inode *)mapping;
+
+	page->virtual = __bread_gfp(inode->i_bdev, index, 4096, 0)->b_data;
+
+	return page;
+}
+
+void put_page(struct page *page)
+{
+	free(page);
+}
+
+int filemap_write_and_wait(struct address_space *mapping)
+{
+	/* TODO */
+	klee_warning("TODO filemap_write_and_wait");
+	return 0;
+}
+
+int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
+				loff_t end)
+{
+	/* TODO */
+//	klee_warning("TODO filemap_fdatawrite_range");
+	return 0;
+}
+
+int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
+			    loff_t end_byte)
+{
+	/* TODO */
+//	klee_warning("TODO filemap_fdatawait_range");
+	return 0;
+}
+
+void wake_up_bit(void *word, int bit)
+{
+}
+
+int set_blocksize(struct block_device *bdev, int size)
+{
+	/* Size must be a power of two, and between 512 and PAGE_SIZE */
+	if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
+		return -EINVAL;
+
+	/* Size cannot be smaller than the size supported by the device */
+//	if (size < bdev_logical_block_size(bdev))
+//		return -EINVAL;
+
+	/* Don't change the size if it is same as current */
+	if (bdev->bd_block_size != size) {
+//		sync_blockdev(bdev);
+		bdev->bd_block_size = size;
+		bdev->bd_inode->i_blkbits = blksize_bits(size);
+//		kill_bdev(bdev);
+	}
+	return 0;
+}
+
+/**************************************/
+
+extern void btrfs_init_compress(void);
+extern int btrfs_init_cachep(void);
+extern int extent_io_init(void);
+extern int extent_map_init(void);
+extern int ordered_data_init(void);
+extern int btrfs_delayed_inode_init(void);
+extern int btrfs_auto_defrag_init(void);
+extern int btrfs_delayed_ref_init(void);
+extern int btrfs_prelim_ref_init(void);
+extern int btrfs_end_io_wq_init(void);
+
+extern int device_list_add(const char *path,
+		struct btrfs_super_block *disk_super, u64 devid,
+		struct btrfs_fs_devices **fs_devices_ret);
+
+extern int btrfs_fill_super(struct super_block *sb,
+			    struct btrfs_fs_devices *fs_devices,
+			    void *data, int silent);
+
+
+#if 0
+static char super_copy[4096];
+static char super_for_commit[4096];
+static struct btrfs_fs_info bfi = {
+	.super_copy = (void *)super_copy,
+	.super_for_commit = (void *)super_for_commit,
+};
+static struct super_block sb = {
+	.s_fs_info = &bfi,
+};
+static struct btrfs_super_block *disk_super;
+#endif
+extern struct file_system_type btrfs_fs_type;
+
+//static char data[4096];
+
+int main(void)
+{
+	unsigned int a;
+	int flags;
+#if 0
+	struct btrfs_fs_devices *fs_dev;
+	u64 devid;
+	u64 total_devices;
+	int ret;
+
+	typeof(sb.s_flags) flags;
+
+	klee_make_symbolic(&flags, sizeof(flags), "sb.s_flags");
+	sb.s_flags = flags;
+	INIT_LIST_HEAD(&sb.s_inodes);
+#endif
+//	klee_make_symbolic(data, sizeof(data), "data");
+	klee_make_symbolic(&flags, sizeof(flags), "flags");
+
+	current_task = malloc(sizeof(struct task_struct));
+	memset(current_task, 0, sizeof(struct task_struct));
+	current_task->pid = 1; /* let it be init :) */
+
+	for (a = 0; a < SECTIONS_PER_ROOT; a++) {
+		mem_section[a] = malloc(sizeof(struct mem_section));
+		memset(mem_section[a], 0, sizeof(struct mem_section));
+	}
+
+	idr_init_cache();
+	radix_tree_init();
+	inode_init();
+
+	btrfs_init_compress();
+	btrfs_init_cachep();
+	extent_io_init();
+	extent_map_init();
+	ordered_data_init();
+	btrfs_delayed_inode_init();
+	btrfs_auto_defrag_init();
+	btrfs_delayed_ref_init();
+	btrfs_prelim_ref_init();
+	btrfs_end_io_wq_init();
+#if 0
+	disk_super = __bread_gfp(&bdev, 16, 4096, 0)->b_data;
+
+	devid = btrfs_stack_device_id(&disk_super->dev_item);
+	total_devices = btrfs_super_num_devices(disk_super);
+
+	ret = device_list_add("/dev/sdb", disk_super, devid, &fs_dev);
+	if (!ret)
+		fs_dev->total_devices = total_devices;
+
+	fs_dev->latest_bdev = &bdev;
+	bfi.fs_devices = fs_dev;
+
+	btrfs_fill_super(&sb, fs_dev, NULL, 0);
+#endif
+	btrfs_fs_type.mount(&btrfs_fs_type, flags, "/dev/sdb", NULL); // data
+
+	return 0;
+}
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 1571bf26dc077a..94145323c57765 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -237,7 +237,9 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
 		init_waitqueue_head(&cur->wait);
 	}
 
-	x = cmpxchg(&info->stripe_hash_table, NULL, table);
+	x = info->stripe_hash_table;
+	if (info->stripe_hash_table == NULL)
+		info->stripe_hash_table = table;
 	if (x)
 		kvfree(x);
 	return 0;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index da687dc79cce61..80ddb1b489091e 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -65,7 +65,7 @@
 #include <trace/events/btrfs.h>
 
 static const struct super_operations btrfs_super_ops;
-static struct file_system_type btrfs_fs_type;
+struct file_system_type btrfs_fs_type;
 
 static int btrfs_remount(struct super_block *sb, int *flags, char *data);
 
@@ -1112,7 +1112,7 @@ static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objec
 	return 0;
 }
 
-static int btrfs_fill_super(struct super_block *sb,
+int btrfs_fill_super(struct super_block *sb,
 			    struct btrfs_fs_devices *fs_devices,
 			    void *data)
 {
@@ -2170,7 +2170,7 @@ static void btrfs_kill_super(struct super_block *sb)
 	free_fs_info(fs_info);
 }
 
-static struct file_system_type btrfs_fs_type = {
+struct file_system_type btrfs_fs_type = {
 	.owner		= THIS_MODULE,
 	.name		= "btrfs",
 	.mount		= btrfs_mount,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 73d56eef5e60f3..179e4475b39ff6 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -259,8 +259,8 @@ static noinline struct btrfs_device *__find_device(struct list_head *head,
 	struct btrfs_device *dev;
 
 	list_for_each_entry(dev, head, dev_list) {
-		if (dev->devid == devid &&
-		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
+		if (dev->devid == devid) {/* &&
+		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {*/
 			return dev;
 		}
 	}
@@ -600,7 +600,7 @@ void btrfs_free_stale_device(struct btrfs_device *cur_dev)
  * 0   - device already known
  * < 0 - error
  */
-static noinline int device_list_add(const char *path,
+noinline int device_list_add(const char *path,
 			   struct btrfs_super_block *disk_super,
 			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
 {
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index 354103f3490c3c..1c9a9683650cd0 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -8,7 +8,7 @@ ext4-y	:= balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
 		ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
 		ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
 		mmp.o indirect.o extents_status.o xattr.o xattr_user.o \
-		xattr_trusted.o inline.o readpage.o sysfs.o
+		xattr_trusted.o inline.o readpage.o sysfs.o main.o
 
 ext4-$(CONFIG_EXT4_FS_POSIX_ACL)	+= acl.o
 ext4-$(CONFIG_EXT4_FS_SECURITY)		+= xattr_security.o
diff --git a/fs/ext4/main.c b/fs/ext4/main.c
new file mode 100644
index 00000000000000..5dae79ee42b5dd
--- /dev/null
+++ b/fs/ext4/main.c
@@ -0,0 +1,347 @@
+#include <linux/backing-dev.h>
+#include <linux/buffer_head.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/raid/pq.h>
+
+#include "../../common.c"
+
+struct backing_dev_info default_backing_dev_info;
+const struct file_operations simple_dir_operations;
+struct raid6_calls raid6_call;
+const struct file_operations pipefifo_fops;
+const struct file_operations def_chr_fops;
+const struct file_operations def_blk_fops;
+const struct file_operations bad_sock_fops;
+
+struct backing_dev_info noop_backing_dev_info = {
+	.name           = "noop",
+	.capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
+};
+int sysctl_vfs_cache_pressure = 100;
+unsigned long phys_base;
+
+void (*raid6_2data_recov)(int, size_t, int, int, void **);
+void (*raid6_datap_recov)(int, size_t, int, void **);
+struct super_block *blockdev_superblock __read_mostly;
+
+struct buffer_head *
+__bread_gfp(struct block_device *bdev, sector_t block,
+		                   unsigned size, gfp_t gfp)
+{
+	static struct buffer_head *blocks[100];
+	struct buffer_head *bh;
+
+	if (blocks[block]) {
+		klee_warning("block returned");
+		return blocks[block];
+	}
+	klee_warning("block alloc");
+
+	bh = malloc(sizeof(*bh));
+	if (bh) {
+		char buf[30];
+		bh->b_blocknr = block;
+		bh->b_size = size;
+		bh->b_bdev = bdev;
+		bh->b_data = malloc(size);
+		sprintf(buf, "bdata%lu", block);
+		klee_make_symbolic(bh->b_data, size, buf);
+		blocks[block] = bh;
+	}
+
+	return bh;
+}
+
+void __brelse(struct buffer_head * buf)
+{
+	if (atomic_read(&buf->b_count)) {
+		put_bh(buf);
+		return;
+	}
+}
+
+void get_filesystem(struct file_system_type *fs)
+{
+}
+
+int register_shrinker(struct shrinker *shrinker)
+{
+	return 0;
+}
+
+/**************************************/
+
+int init_srcu_struct(struct srcu_struct *sp)
+{
+	return 0;
+}
+
+void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+{
+}
+
+int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
+{
+	return 0;
+}
+
+void invalidate_bdev(struct block_device *bdev)
+{
+}
+
+void inode_wait_for_writeback(struct inode *inode)
+{
+}
+
+void truncate_inode_pages_final(struct address_space *mapping)
+{
+}
+
+void mark_page_accessed(struct page *page)
+{
+}
+
+struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
+{
+	return malloc(1);
+}
+
+struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
+					       unsigned int flags,
+					       int max_active,
+					       struct lock_class_key *key,
+					       const char *lock_name, ...)
+{
+	return malloc(1);
+}
+
+void destroy_workqueue(struct workqueue_struct *wq)
+{
+	free(wq);
+}
+
+struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
+	int fgp_flags, gfp_t gfp_mask)
+{
+	__assert_fail("aaa", __FILE__, __LINE__, __PRETTY_FUNCTION__);
+	struct page *pg = malloc(sizeof(*pg));
+	void *pg_data = malloc(4096);
+	memset(pg, 0, sizeof(*pg));
+	pg->virtual = pg_data;
+	return pg;
+}
+
+void unlock_page(struct page *page)
+{
+	clear_bit_unlock(PG_locked, &page->flags);
+}
+
+struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
+					void *holder)
+{
+	static struct inode bd_inode;
+	static struct request_queue queue;
+	static struct gendisk bd_disk = {
+		.queue = &queue,
+	};
+	static struct block_device bdev = {
+		.bd_inode = &bd_inode,
+		.bd_disk = &bd_disk,
+	};
+	klee_make_symbolic(&bd_inode, sizeof(bd_inode), "bd_inode");
+	bd_inode.i_bdev = &bdev;
+	bd_inode.i_mapping = (struct address_space *)&bd_inode;
+
+	return &bdev;
+}
+
+void blkdev_put(struct block_device *bdev, fmode_t mode)
+{
+}
+
+int bdev_read_only(struct block_device *bdev) 
+{
+	return klee_int(__func__);
+}
+
+const char *bdevname(struct block_device *bdev, char *buf)
+{
+	strcpy(buf, "sdb");
+	return buf;
+}
+
+
+struct page *read_cache_page_gfp(struct address_space *mapping,
+				pgoff_t index,
+				gfp_t gfp)
+{
+	struct page *page = malloc(sizeof(*page));
+	struct inode *inode = (struct inode *)mapping;
+
+	page->virtual = __bread_gfp(inode->i_bdev, index, 4096, 0)->b_data;
+
+	return page;
+}
+#if 0
+void put_page(struct page *page)
+{
+	free(page);
+}
+#endif
+
+int filemap_write_and_wait(struct address_space *mapping)
+{
+	/* TODO */
+	klee_warning("TODO filemap_write_and_wait");
+	return 0;
+}
+
+int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
+				loff_t end)
+{
+	/* TODO */
+//	klee_warning("TODO filemap_fdatawrite_range");
+	return 0;
+}
+
+int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
+			    loff_t end_byte)
+{
+	/* TODO */
+//	klee_warning("TODO filemap_fdatawait_range");
+	return 0;
+}
+
+void wake_up_bit(void *word, int bit)
+{
+}
+
+int __percpu_init_rwsem(struct percpu_rw_semaphore *brw,
+		const char *name, struct lock_class_key *rwsem_key)
+{
+	return 0;
+}
+
+void put_filesystem(struct file_system_type *fs)
+{
+}
+
+int sb_set_blocksize(struct super_block *sb, int size)
+{
+	if (set_blocksize(sb->s_bdev, size))
+		return 0;
+	/* If we get here, we know size is power of two
+	 * and it's value is between 512 and PAGE_SIZE */
+	sb->s_blocksize = size;
+	sb->s_blocksize_bits = blksize_bits(size);
+	return sb->s_blocksize;
+}
+
+void unregister_shrinker(struct shrinker *shrinker)
+{
+}
+
+int sync_blockdev(struct block_device *bdev)
+{
+	return 0;
+}
+
+int sb_min_blocksize(struct super_block *sb, int size)
+{
+	int minsize = bdev_logical_block_size(sb->s_bdev);
+	if (size < minsize)
+		size = minsize;
+	return sb_set_blocksize(sb, size);
+}
+
+int set_blocksize(struct block_device *bdev, int size)
+{
+	/* Size must be a power of two, and between 512 and PAGE_SIZE */
+	if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
+		return -EINVAL;
+
+	/* Size cannot be smaller than the size supported by the device */
+//	if (size < bdev_logical_block_size(bdev))
+//		return -EINVAL;
+
+	/* Don't change the size if it is same as current */
+	if (bdev->bd_block_size != size) {
+//		sync_blockdev(bdev);
+		bdev->bd_block_size = size;
+		bdev->bd_inode->i_blkbits = blksize_bits(size);
+//		kill_bdev(bdev);
+	}
+	return 0;
+}
+
+/**************************************/
+
+extern int device_list_add(const char *path,
+		struct btrfs_super_block *disk_super, u64 devid,
+		struct btrfs_fs_devices **fs_devices_ret);
+
+extern struct file_system_type ext4_fs_type;
+
+int main(void)
+{
+	unsigned int a;
+	int flags;
+#if 0
+	struct btrfs_fs_devices *fs_dev;
+	u64 devid;
+	u64 total_devices;
+	int ret;
+
+	typeof(sb.s_flags) flags;
+
+	klee_make_symbolic(&flags, sizeof(flags), "sb.s_flags");
+	sb.s_flags = flags;
+	INIT_LIST_HEAD(&sb.s_inodes);
+#endif
+//	klee_make_symbolic(data, sizeof(data), "data");
+	klee_make_symbolic(&flags, sizeof(flags), "flags");
+
+	current_task = malloc(sizeof(struct task_struct));
+	memset(current_task, 0, sizeof(struct task_struct));
+	current_task->pid = 1; /* let it be init :) */
+
+	for (a = 0; a < SECTIONS_PER_ROOT; a++) {
+		mem_section[a] = malloc(sizeof(struct mem_section));
+		memset(mem_section[a], 0, sizeof(struct mem_section));
+	}
+#if 0
+	idr_init_cache();
+	radix_tree_init();
+	inode_init();
+
+	btrfs_init_compress();
+	btrfs_init_cachep();
+	extent_io_init();
+	extent_map_init();
+	ordered_data_init();
+	btrfs_delayed_inode_init();
+	btrfs_auto_defrag_init();
+	btrfs_delayed_ref_init();
+	btrfs_prelim_ref_init();
+	btrfs_end_io_wq_init();
+#if 0
+	disk_super = __bread_gfp(&bdev, 16, 4096, 0)->b_data;
+
+	devid = btrfs_stack_device_id(&disk_super->dev_item);
+	total_devices = btrfs_super_num_devices(disk_super);
+
+	ret = device_list_add("/dev/sdb", disk_super, devid, &fs_dev);
+	if (!ret)
+		fs_dev->total_devices = total_devices;
+
+	fs_dev->latest_bdev = &bdev;
+	bfi.fs_devices = fs_dev;
+
+	btrfs_fill_super(&sb, fs_dev, NULL, 0);
+#endif
+#endif
+	ext4_fs_type.mount(&ext4_fs_type, flags, "/dev/sdb", ""); // data
+
+	return 0;
+}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 2e03a0a88d92f7..956cac360bb1dd 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3350,7 +3350,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 	int ret = -ENOMEM;
 	int blocksize, clustersize;
 	unsigned int db_count;
-	unsigned int i;
+	unsigned int i, shift;
 	int needs_recovery, has_huge_files, has_bigalloc;
 	__u64 blocks_count;
 	int err = 0;
@@ -3609,7 +3609,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 	if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY)))
 		goto failed_mount;
 
-	blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
+//	printf("%d BS=%lu\n", BLOCK_SIZE, klee_get_valuel(le32_to_cpu(es->s_log_block_size)));
+	shift = max(10U, le32_to_cpu(es->s_log_block_size));
+//	printf("S=%u\n", klee_get_valuel(shift));
+//	klee_print_expr("SS=", shift);
+	blocksize = BLOCK_SIZE << shift;
+	__assert_fail("xx", __FILE__, __LINE__, __PRETTY_FUNCTION__);
 	if (blocksize < EXT4_MIN_BLOCK_SIZE ||
 	    blocksize > EXT4_MAX_BLOCK_SIZE) {
 		ext4_msg(sb, KERN_ERR,
@@ -5620,7 +5625,7 @@ static inline int ext3_feature_set_ok(struct super_block *sb)
 	return 1;
 }
 
-static struct file_system_type ext4_fs_type = {
+struct file_system_type ext4_fs_type = {
 	.owner		= THIS_MODULE,
 	.name		= "ext4",
 	.mount		= ext4_mount,
diff --git a/fs/inode.c b/fs/inode.c
index 88110fd0b282e4..f540caea323203 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -191,7 +191,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
 	inode->i_fsnotify_mask = 0;
 #endif
 	inode->i_flctx = NULL;
-	this_cpu_inc(nr_inodes);
+//	this_cpu_inc(nr_inodes);
 
 	return 0;
 out:
@@ -423,9 +423,10 @@ void inode_add_lru(struct inode *inode)
 
 static void inode_lru_list_del(struct inode *inode)
 {
-
-	if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
-		this_cpu_dec(nr_unused);
+	if (!list_empty(&inode->i_lru))
+		list_del_init(&inode->i_lru);
+/*	if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
+		this_cpu_dec(nr_unused);*/
 }
 
 /**
@@ -473,7 +474,7 @@ void __insert_inode_hash(struct inode *inode, unsigned long hashval)
 
 	spin_lock(&inode_hash_lock);
 	spin_lock(&inode->i_lock);
-	hlist_add_head(&inode->i_hash, b);
+//	hlist_add_head(&inode->i_hash, b);
 	spin_unlock(&inode->i_lock);
 	spin_unlock(&inode_hash_lock);
 }
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 79591c3660cc1d..272cf1524d3d75 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -282,7 +282,7 @@ static inline void get_bh(struct buffer_head *bh)
 
 static inline void put_bh(struct buffer_head *bh)
 {
-        smp_mb__before_atomic();
+//        smp_mb__before_atomic();
         atomic_dec(&bh->b_count);
 }
 
diff --git a/include/linux/console.h b/include/linux/console.h
index 5949d185558990..01783401758f6e 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -194,7 +194,7 @@ void vcs_make_sysfs(int index);
 void vcs_remove_sysfs(int index);
 
 /* Some debug stub to catch some of the obvious races in the VT code */
-#if 1
+#if 0
 #define WARN_CONSOLE_UNLOCKED()	WARN_ON(!is_console_locked() && !oops_in_progress)
 #else
 #define WARN_CONSOLE_UNLOCKED()
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index bc2994ed66e1c0..018bc01bb029fc 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -61,20 +61,18 @@ do {								\
 #define read_trylock(lock)	__cond_lock(lock, _raw_read_trylock(lock))
 #define write_trylock(lock)	__cond_lock(lock, _raw_write_trylock(lock))
 
-#define write_lock(lock)	_raw_write_lock(lock)
-#define read_lock(lock)		_raw_read_lock(lock)
+#define write_lock(lock)	
+#define read_lock(lock)		
 
 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
 
 #define read_lock_irqsave(lock, flags)			\
 	do {						\
 		typecheck(unsigned long, flags);	\
-		flags = _raw_read_lock_irqsave(lock);	\
 	} while (0)
 #define write_lock_irqsave(lock, flags)			\
 	do {						\
 		typecheck(unsigned long, flags);	\
-		flags = _raw_write_lock_irqsave(lock);	\
 	} while (0)
 
 #else
@@ -82,38 +80,34 @@ do {								\
 #define read_lock_irqsave(lock, flags)			\
 	do {						\
 		typecheck(unsigned long, flags);	\
-		_raw_read_lock_irqsave(lock, flags);	\
 	} while (0)
 #define write_lock_irqsave(lock, flags)			\
 	do {						\
 		typecheck(unsigned long, flags);	\
-		_raw_write_lock_irqsave(lock, flags);	\
 	} while (0)
 
 #endif
 
-#define read_lock_irq(lock)		_raw_read_lock_irq(lock)
-#define read_lock_bh(lock)		_raw_read_lock_bh(lock)
-#define write_lock_irq(lock)		_raw_write_lock_irq(lock)
-#define write_lock_bh(lock)		_raw_write_lock_bh(lock)
-#define read_unlock(lock)		_raw_read_unlock(lock)
-#define write_unlock(lock)		_raw_write_unlock(lock)
-#define read_unlock_irq(lock)		_raw_read_unlock_irq(lock)
-#define write_unlock_irq(lock)		_raw_write_unlock_irq(lock)
+#define read_lock_irq(lock)		
+#define read_lock_bh(lock)		
+#define write_lock_irq(lock)		
+#define write_lock_bh(lock)		
+#define read_unlock(lock)		
+#define write_unlock(lock)		
+#define read_unlock_irq(lock)		
+#define write_unlock_irq(lock)		
 
 #define read_unlock_irqrestore(lock, flags)			\
 	do {							\
 		typecheck(unsigned long, flags);		\
-		_raw_read_unlock_irqrestore(lock, flags);	\
 	} while (0)
-#define read_unlock_bh(lock)		_raw_read_unlock_bh(lock)
+#define read_unlock_bh(lock)		
 
 #define write_unlock_irqrestore(lock, flags)		\
 	do {						\
 		typecheck(unsigned long, flags);	\
-		_raw_write_unlock_irqrestore(lock, flags);	\
 	} while (0)
-#define write_unlock_bh(lock)		_raw_write_unlock_bh(lock)
+#define write_unlock_bh(lock)		
 
 #define write_trylock_irqsave(lock, flags) \
 ({ \
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 2cf446704cd4fc..729eda333ec2bc 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -312,7 +312,8 @@ static inline int restart_syscall(void)
 
 static inline int signal_pending(struct task_struct *p)
 {
-	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
+	extern int klee_int(const char *name);
+	return klee_int("signal_pending");
 }
 
 static inline int __fatal_signal_pending(struct task_struct *p)
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 59248dcc6ef349..2dc39ec01320ac 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -224,7 +224,6 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
 #define raw_spin_lock_irqsave(lock, flags)		\
 	do {						\
 		typecheck(unsigned long, flags);	\
-		_raw_spin_lock_irqsave(lock, flags);	\
 	} while (0)
 
 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)	\
@@ -232,17 +231,16 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
 
 #endif
 
-#define raw_spin_lock_irq(lock)		_raw_spin_lock_irq(lock)
-#define raw_spin_lock_bh(lock)		_raw_spin_lock_bh(lock)
-#define raw_spin_unlock(lock)		_raw_spin_unlock(lock)
-#define raw_spin_unlock_irq(lock)	_raw_spin_unlock_irq(lock)
+#define raw_spin_lock_irq(lock)		do {} while (0)
+#define raw_spin_lock_bh(lock)		do {} while (0)
+#define raw_spin_unlock(lock)		do {} while (0)
+#define raw_spin_unlock_irq(lock)	do {} while (0)
 
 #define raw_spin_unlock_irqrestore(lock, flags)		\
 	do {							\
 		typecheck(unsigned long, flags);		\
-		_raw_spin_unlock_irqrestore(lock, flags);	\
 	} while (0)
-#define raw_spin_unlock_bh(lock)	_raw_spin_unlock_bh(lock)
+#define raw_spin_unlock_bh(lock)	do {} while (0)
 
 #define raw_spin_trylock_bh(lock) \
 	__cond_lock(lock, _raw_spin_trylock_bh(lock))
@@ -296,12 +294,10 @@ do {							\
 
 static __always_inline void spin_lock(spinlock_t *lock)
 {
-	raw_spin_lock(&lock->rlock);
 }
 
 static __always_inline void spin_lock_bh(spinlock_t *lock)
 {
-	raw_spin_lock_bh(&lock->rlock);
 }
 
 static __always_inline int spin_trylock(spinlock_t *lock)
@@ -321,37 +317,30 @@ do {									\
 
 static __always_inline void spin_lock_irq(spinlock_t *lock)
 {
-	raw_spin_lock_irq(&lock->rlock);
 }
 
 #define spin_lock_irqsave(lock, flags)				\
 do {								\
-	raw_spin_lock_irqsave(spinlock_check(lock), flags);	\
 } while (0)
 
 #define spin_lock_irqsave_nested(lock, flags, subclass)			\
 do {									\
-	raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
 } while (0)
 
 static __always_inline void spin_unlock(spinlock_t *lock)
 {
-	raw_spin_unlock(&lock->rlock);
 }
 
 static __always_inline void spin_unlock_bh(spinlock_t *lock)
 {
-	raw_spin_unlock_bh(&lock->rlock);
 }
 
 static __always_inline void spin_unlock_irq(spinlock_t *lock)
 {
-	raw_spin_unlock_irq(&lock->rlock);
 }
 
 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
 {
-	raw_spin_unlock_irqrestore(&lock->rlock, flags);
 }
 
 static __always_inline int spin_trylock_bh(spinlock_t *lock)
@@ -406,6 +395,6 @@ static __always_inline int spin_can_lock(spinlock_t *lock)
  */
 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
 #define atomic_dec_and_lock(atomic, lock) \
-		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+		atomic_dec_and_test(atomic)
 
 #endif /* __LINUX_SPINLOCK_H */


glossary
--------
Commit objects reference one tree, and zero or more parents.

Single parent commits can typically generate a patch in
unified diff format via `git format-patch'.

Multiple parents means the commit is a merge.

Root commits have no ancestor.  Note that it is
possible to have multiple root commits when merging independent histories.

Every commit references one top-level tree object.

git clone https://yhbt.net/lore/pub/scm/linux/kernel/git/jirislaby/linux.git