summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/hexdump.c2
-rw-r--r--lib/kobject.c7
-rw-r--r--lib/lockref.c23
3 files changed, 24 insertions, 8 deletions
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 3f0494c9d57a..8499c810909a 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -14,6 +14,8 @@
const char hex_asc[] = "0123456789abcdef";
EXPORT_SYMBOL(hex_asc);
+const char hex_asc_upper[] = "0123456789ABCDEF";
+EXPORT_SYMBOL(hex_asc_upper);
/**
* hex_to_bin - convert a hex digit to its real value
diff --git a/lib/kobject.c b/lib/kobject.c
index 962175134702..084f7b18d0c0 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -592,7 +592,7 @@ static void kobject_release(struct kref *kref)
{
struct kobject *kobj = container_of(kref, struct kobject, kref);
#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
- pr_debug("kobject: '%s' (%p): %s, parent %p (delayed)\n",
+ pr_info("kobject: '%s' (%p): %s, parent %p (delayed)\n",
kobject_name(kobj), kobj, __func__, kobj->parent);
INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup);
schedule_delayed_work(&kobj->release, HZ);
@@ -933,10 +933,7 @@ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)
bool kobj_ns_current_may_mount(enum kobj_ns_type type)
{
- bool may_mount = false;
-
- if (type == KOBJ_NS_TYPE_NONE)
- return true;
+ bool may_mount = true;
spin_lock(&kobj_ns_type_lock);
if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
diff --git a/lib/lockref.c b/lib/lockref.c
index 677d036cf3c7..6f9d434c1521 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -4,6 +4,22 @@
#ifdef CONFIG_CMPXCHG_LOCKREF
/*
+ * Allow weakly-ordered memory architectures to provide barrier-less
+ * cmpxchg semantics for lockref updates.
+ */
+#ifndef cmpxchg64_relaxed
+# define cmpxchg64_relaxed cmpxchg64
+#endif
+
+/*
+ * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP.
+ * This is useful for architectures with an expensive cpu_relax().
+ */
+#ifndef arch_mutex_cpu_relax
+# define arch_mutex_cpu_relax() cpu_relax()
+#endif
+
+/*
* Note that the "cmpxchg()" reloads the "old" value for the
* failure case.
*/
@@ -14,12 +30,13 @@
while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
struct lockref new = old, prev = old; \
CODE \
- old.lock_count = cmpxchg64(&lockref->lock_count, \
- old.lock_count, new.lock_count); \
+ old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
+ old.lock_count, \
+ new.lock_count); \
if (likely(old.lock_count == prev.lock_count)) { \
SUCCESS; \
} \
- cpu_relax(); \
+ arch_mutex_cpu_relax(); \
} \
} while (0)