summaryrefslogtreecommitdiffstats
path: root/lib/sort.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/sort.c')
-rw-r--r--lib/sort.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/lib/sort.c b/lib/sort.c
index 50855ea8c262..cf408aec3733 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -43,8 +43,9 @@ static bool is_aligned(const void *base, size_t size, unsigned char align)
/**
* swap_words_32 - swap two elements in 32-bit chunks
- * @a, @b: pointers to the elements
- * @size: element size (must be a multiple of 4)
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size (must be a multiple of 4)
*
* Exchange the two objects in memory. This exploits base+index addressing,
* which basically all CPUs have, to minimize loop overhead computations.
@@ -65,8 +66,9 @@ static void swap_words_32(void *a, void *b, size_t n)
/**
* swap_words_64 - swap two elements in 64-bit chunks
- * @a, @b: pointers to the elements
- * @size: element size (must be a multiple of 8)
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size (must be a multiple of 8)
*
* Exchange the two objects in memory. This exploits base+index
* addressing, which basically all CPUs have, to minimize loop overhead
@@ -100,8 +102,9 @@ static void swap_words_64(void *a, void *b, size_t n)
/**
* swap_bytes - swap two elements a byte at a time
- * @a, @b: pointers to the elements
- * @size: element size
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size
*
* This is the fallback if alignment doesn't allow using larger chunks.
*/