[PATCH 1/2] arm: CVE-2020-6096: fix memcpy and memmove for negative length [BZ #25620]

classic Classic list List threaded Threaded
7 messages Options
Reply | Threaded
Open this post in threaded view
|

[PATCH 1/2] arm: CVE-2020-6096: fix memcpy and memmove for negative length [BZ #25620]

Florian Weimer
From: Evgeny Eremin <[hidden email]>

Unsigned branch instructions could be used for r2 to fix the wrong
behavior when a negative length is passed to memcpy and memmove.
This commit fixes the generic arm implementation of memcpy amd memmove.

---
Please double-check for correct attribution and commit message
contents.  Thanks.  I will submit the NEWS update and XFAIL removal
separately.

 sysdeps/arm/memcpy.S  | 24 ++++++++++--------------
 sysdeps/arm/memmove.S | 24 ++++++++++--------------
 2 files changed, 20 insertions(+), 28 deletions(-)

diff --git a/sysdeps/arm/memcpy.S b/sysdeps/arm/memcpy.S
index 510e8adaf2..bcfbc51d99 100644
--- a/sysdeps/arm/memcpy.S
+++ b/sysdeps/arm/memcpy.S
@@ -68,7 +68,7 @@ ENTRY(memcpy)
  cfi_remember_state
 
  subs r2, r2, #4
- blt 8f
+ blo 8f
  ands ip, r0, #3
  PLD( pld [r1, #0] )
  bne 9f
@@ -82,7 +82,7 @@ ENTRY(memcpy)
  cfi_rel_offset (r6, 4)
  cfi_rel_offset (r7, 8)
  cfi_rel_offset (r8, 12)
- blt 5f
+ blo 5f
 
  CALGN( ands ip, r1, #31 )
  CALGN( rsb r3, ip, #32 )
@@ -98,9 +98,9 @@ ENTRY(memcpy)
 #endif
 
  PLD( pld [r1, #0] )
-2: PLD( subs r2, r2, #96 )
+2: PLD( cmp r2, #96 )
  PLD( pld [r1, #28] )
- PLD( blt 4f )
+ PLD( blo 4f )
  PLD( pld [r1, #60] )
  PLD( pld [r1, #92] )
 
@@ -108,9 +108,7 @@ ENTRY(memcpy)
 4: ldmia r1!, {r3, r4, r5, r6, r7, r8, ip, lr}
  subs r2, r2, #32
  stmia r0!, {r3, r4, r5, r6, r7, r8, ip, lr}
- bge 3b
- PLD( cmn r2, #96 )
- PLD( bge 4b )
+ bhs 3b
 
 5: ands ip, r2, #28
  rsb ip, ip, #32
@@ -222,7 +220,7 @@ ENTRY(memcpy)
  strbge r4, [r0], #1
  subs r2, r2, ip
  strb lr, [r0], #1
- blt 8b
+ blo 8b
  ands ip, r1, #3
  beq 1b
 
@@ -236,7 +234,7 @@ ENTRY(memcpy)
  .macro forward_copy_shift pull push
 
  subs r2, r2, #28
- blt 14f
+ blo 14f
 
  CALGN( ands ip, r1, #31 )
  CALGN( rsb ip, ip, #32 )
@@ -253,9 +251,9 @@ ENTRY(memcpy)
  cfi_rel_offset (r10, 16)
 
  PLD( pld [r1, #0] )
- PLD( subs r2, r2, #96 )
+ PLD( cmp r2, #96 )
  PLD( pld [r1, #28] )
- PLD( blt 13f )
+ PLD( blo 13f )
  PLD( pld [r1, #60] )
  PLD( pld [r1, #92] )
 
@@ -280,9 +278,7 @@ ENTRY(memcpy)
  mov ip, ip, PULL #\pull
  orr ip, ip, lr, PUSH #\push
  stmia r0!, {r3, r4, r5, r6, r7, r8, r10, ip}
- bge 12b
- PLD( cmn r2, #96 )
- PLD( bge 13b )
+ bhs 12b
 
  pop {r5 - r8, r10}
  cfi_adjust_cfa_offset (-20)
diff --git a/sysdeps/arm/memmove.S b/sysdeps/arm/memmove.S
index 954037ef3a..0d07b76ee6 100644
--- a/sysdeps/arm/memmove.S
+++ b/sysdeps/arm/memmove.S
@@ -85,7 +85,7 @@ ENTRY(memmove)
  add r1, r1, r2
  add r0, r0, r2
  subs r2, r2, #4
- blt 8f
+ blo 8f
  ands ip, r0, #3
  PLD( pld [r1, #-4] )
  bne 9f
@@ -99,7 +99,7 @@ ENTRY(memmove)
  cfi_rel_offset (r6, 4)
  cfi_rel_offset (r7, 8)
  cfi_rel_offset (r8, 12)
- blt 5f
+ blo     5f
 
  CALGN( ands ip, r1, #31 )
  CALGN( sbcsne r4, ip, r2 )  @ C is always set here
@@ -114,9 +114,9 @@ ENTRY(memmove)
 #endif
 
  PLD( pld [r1, #-4] )
-2: PLD( subs r2, r2, #96 )
+2: PLD( cmp r2, #96 )
  PLD( pld [r1, #-32] )
- PLD( blt 4f )
+ PLD(    blo     4f                      )
  PLD( pld [r1, #-64] )
  PLD( pld [r1, #-96] )
 
@@ -124,9 +124,7 @@ ENTRY(memmove)
 4: ldmdb r1!, {r3, r4, r5, r6, r7, r8, ip, lr}
  subs r2, r2, #32
  stmdb r0!, {r3, r4, r5, r6, r7, r8, ip, lr}
- bge 3b
- PLD( cmn r2, #96 )
- PLD( bge 4b )
+ bhs     3b
 
 5: ands ip, r2, #28
  rsb ip, ip, #32
@@ -237,7 +235,7 @@ ENTRY(memmove)
  strbge r4, [r0, #-1]!
  subs r2, r2, ip
  strb lr, [r0, #-1]!
- blt 8b
+ blo 8b
  ands ip, r1, #3
  beq 1b
 
@@ -251,7 +249,7 @@ ENTRY(memmove)
  .macro backward_copy_shift push pull
 
  subs r2, r2, #28
- blt 14f
+ blo 14f
 
  CALGN( ands ip, r1, #31 )
  CALGN( rsb ip, ip, #32 )
@@ -268,9 +266,9 @@ ENTRY(memmove)
  cfi_rel_offset (r10, 16)
 
  PLD( pld [r1, #-4] )
- PLD( subs r2, r2, #96 )
+ PLD( cmp r2, #96 )
  PLD( pld [r1, #-32] )
- PLD( blt 13f )
+ PLD( blo 13f )
  PLD( pld [r1, #-64] )
  PLD( pld [r1, #-96] )
 
@@ -295,9 +293,7 @@ ENTRY(memmove)
  mov     r4, r4, PUSH #\push
  orr     r4, r4, r3, PULL #\pull
  stmdb   r0!, {r4 - r8, r10, ip, lr}
- bge 12b
- PLD( cmn r2, #96 )
- PLD( bge 13b )
+ bhs 12b
 
  pop {r5 - r8, r10}
  cfi_adjust_cfa_offset (-20)
--
2.20.1


Reply | Threaded
Open this post in threaded view
|

[PATCH 2/2] arm: CVE-2020-6096: Fix multiarch memcpy for negative length [BZ #25620]

Florian Weimer
From: Alexander Anisimov <[hidden email]>

Unsigned branch instructions could be used for r2 to fix the wrong
behavior when a negative length is passed to memcpy.
This commit fixes the armv7 version.

---
Please double-check for correct attribution and commit message
contents.  Thanks.  I will submit the NEWS update and XFAIL removal
separately.

 sysdeps/arm/armv7/multiarch/memcpy_impl.S | 22 +++++++++++-----------
 1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/sysdeps/arm/armv7/multiarch/memcpy_impl.S b/sysdeps/arm/armv7/multiarch/memcpy_impl.S
index bf4ac7077f..379bb56fc9 100644
--- a/sysdeps/arm/armv7/multiarch/memcpy_impl.S
+++ b/sysdeps/arm/armv7/multiarch/memcpy_impl.S
@@ -268,7 +268,7 @@ ENTRY(memcpy)
 
  mov dst, dstin /* Preserve dstin, we need to return it.  */
  cmp count, #64
- bge .Lcpy_not_short
+ bhs .Lcpy_not_short
  /* Deal with small copies quickly by dropping straight into the
    exit block.  */
 
@@ -351,10 +351,10 @@ ENTRY(memcpy)
 
 1:
  subs tmp2, count, #64 /* Use tmp2 for count.  */
- blt .Ltail63aligned
+ blo .Ltail63aligned
 
  cmp tmp2, #512
- bge .Lcpy_body_long
+ bhs .Lcpy_body_long
 
 .Lcpy_body_medium: /* Count in tmp2.  */
 #ifdef USE_VFP
@@ -378,7 +378,7 @@ ENTRY(memcpy)
  add src, src, #64
  vstr d1, [dst, #56]
  add dst, dst, #64
- bge 1b
+ bhs 1b
  tst tmp2, #0x3f
  beq .Ldone
 
@@ -412,7 +412,7 @@ ENTRY(memcpy)
  ldrd A_l, A_h, [src, #64]!
  strd A_l, A_h, [dst, #64]!
  subs tmp2, tmp2, #64
- bge 1b
+ bhs 1b
  tst tmp2, #0x3f
  bne 1f
  ldr tmp2,[sp], #FRAME_SIZE
@@ -482,7 +482,7 @@ ENTRY(memcpy)
  add src, src, #32
 
  subs tmp2, tmp2, #prefetch_lines * 64 * 2
- blt 2f
+ blo 2f
 1:
  cpy_line_vfp d3, 0
  cpy_line_vfp d4, 64
@@ -494,7 +494,7 @@ ENTRY(memcpy)
  add dst, dst, #2 * 64
  add src, src, #2 * 64
  subs tmp2, tmp2, #prefetch_lines * 64
- bge 1b
+ bhs 1b
 
 2:
  cpy_tail_vfp d3, 0
@@ -615,8 +615,8 @@ ENTRY(memcpy)
 1:
  pld [src, #(3 * 64)]
  subs count, count, #64
- ldrmi tmp2, [sp], #FRAME_SIZE
- bmi .Ltail63unaligned
+ ldrlo tmp2, [sp], #FRAME_SIZE
+ blo .Ltail63unaligned
  pld [src, #(4 * 64)]
 
 #ifdef USE_NEON
@@ -633,7 +633,7 @@ ENTRY(memcpy)
  neon_load_multi d0-d3, src
  neon_load_multi d4-d7, src
  subs count, count, #64
- bmi 2f
+ blo 2f
 1:
  pld [src, #(4 * 64)]
  neon_store_multi d0-d3, dst
@@ -641,7 +641,7 @@ ENTRY(memcpy)
  neon_store_multi d4-d7, dst
  neon_load_multi d4-d7, src
  subs count, count, #64
- bpl 1b
+ bhs 1b
 2:
  neon_store_multi d0-d3, dst
  neon_store_multi d4-d7, dst
--
2.20.1

Reply | Threaded
Open this post in threaded view
|

Re: [PATCH 1/2] arm: CVE-2020-6096: fix memcpy and memmove for negative length [BZ #25620]

Evgeny Eremin
In reply to this post by Florian Weimer
On Tue, Jul 07, 2020 at 08:08:22PM +0200, Florian Weimer wrote:

> From: Evgeny Eremin <[hidden email]>
>
> Unsigned branch instructions could be used for r2 to fix the wrong
> behavior when a negative length is passed to memcpy and memmove.
> This commit fixes the generic arm implementation of memcpy amd memmove.
>
> ---
> Please double-check for correct attribution and commit message
> contents.  Thanks.  I will submit the NEWS update and XFAIL removal
> separately.
>
> ...

LGTM, thanks.

--
Evgeny
Reply | Threaded
Open this post in threaded view
|

Re: [PATCH 1/2] arm: CVE-2020-6096: fix memcpy and memmove for negative length [BZ #25620]

Sourceware - libc-alpha mailing list
In reply to this post by Florian Weimer
On 7/7/20 2:08 PM, Florian Weimer wrote:
> From: Evgeny Eremin <[hidden email]>
>
> Unsigned branch instructions could be used for r2 to fix the wrong
> behavior when a negative length is passed to memcpy and memmove.
> This commit fixes the generic arm implementation of memcpy amd memmove.

As a GNU Maintainer for glibc I have some visibility into the tickets
filed for FSF copyright assignment to glibc.

For the record both Open Mobile Platform LLC and Evgeny Eremin
have FSF copyright assignment on file for glibc. We can accept any
and all patches. Thank you all for your patience with the CLA process.

I can also confirm (and sorry for not responding more quickly on the
other thread) that all discussions between Open Mobile Platform LLC
were had with *@omprussia.ru accounts and the FSF Copyright &
Licensing Associate as part of ticket [gnu.org #1543679].

In this case *@omprussia.ru emails are by default considered to be
employees of Open Mobile Platform LLC for the purposes of the
copyright assignment, but we all know that might be inacccurate and
we trust everyone to maintain correct protocols for copyright
assignment.

--
Cheers,
Carlos.

Reply | Threaded
Open this post in threaded view
|

Re: [PATCH 1/2] arm: CVE-2020-6096: fix memcpy and memmove for negative length [BZ #25620]

Florian Weimer
In reply to this post by Evgeny Eremin
* Evgeny Eremin:

> On Tue, Jul 07, 2020 at 08:08:22PM +0200, Florian Weimer wrote:
>> From: Evgeny Eremin <[hidden email]>
>>
>> Unsigned branch instructions could be used for r2 to fix the wrong
>> behavior when a negative length is passed to memcpy and memmove.
>> This commit fixes the generic arm implementation of memcpy amd memmove.
>>
>> ---
>> Please double-check for correct attribution and commit message
>> contents.  Thanks.  I will submit the NEWS update and XFAIL removal
>> separately.
>>
>> ...
>
> LGTM, thanks.

Is the other patch okay as well?  Thanks.
Reply | Threaded
Open this post in threaded view
|

RE: [PATCH 1/2] arm: CVE-2020-6096: fix memcpy and memmove for negative length [BZ #25620]

Alexander Anisimov
Yes, it's, patch for multiarch and commit message LGTM, too. Thanks.
________________________________________
From: Florian Weimer [[hidden email]]
Sent: 08 July 2020 08:50
To: [hidden email]
Cc: Konstantin Karasev; Anton Rybakov; Ildar Kamaletdinov; Alexander Anisimov
Subject: Re: [PATCH 1/2] arm: CVE-2020-6096: fix memcpy and memmove for negative length [BZ #25620]

* Evgeny Eremin:

> On Tue, Jul 07, 2020 at 08:08:22PM +0200, Florian Weimer wrote:
>> From: Evgeny Eremin <[hidden email]>
>>
>> Unsigned branch instructions could be used for r2 to fix the wrong
>> behavior when a negative length is passed to memcpy and memmove.
>> This commit fixes the generic arm implementation of memcpy amd memmove.
>>
>> ---
>> Please double-check for correct attribution and commit message
>> contents.  Thanks.  I will submit the NEWS update and XFAIL removal
>> separately.
>>
>> ...
>
> LGTM, thanks.

Is the other patch okay as well?  Thanks.
Reply | Threaded
Open this post in threaded view
|

RE: [PATCH 1/2] arm: CVE-2020-6096: fix memcpy and memmove for negative length [BZ #25620]

Alexander Anisimov
In reply to this post by Florian Weimer
>> On Tue, Jul 07, 2020 at 08:08:22PM +0200, Florian Weimer wrote:
>>> From: Evgeny Eremin <[hidden email]>
>>>
>>> Unsigned branch instructions could be used for r2 to fix the wrong
>>> behavior when a negative length is passed to memcpy and memmove.
>>> This commit fixes the generic arm implementation of memcpy amd memmove.
>>>
>>> ---
>>> Please double-check for correct attribution and commit message
>>> contents.  Thanks.  I will submit the NEWS update and XFAIL removal
>>> separately.
>>>
>>> ...
>>
>> LGTM, thanks.

> Is the other patch okay as well?  Thanks.

Yes, it's, patch for multiarch and commit message LGTM, too. Thanks.

--
Alexander