@@ -264,86 +264,14 @@ static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
264
264
#define GET_F32_RS2C (insn , regs ) (get_f32_rs(insn, 2, regs))
265
265
#define GET_F32_RS2S (insn , regs ) (get_f32_rs(RVC_RS2S(insn), 0, regs))
266
266
267
- #ifdef CONFIG_RISCV_M_MODE
268
- static inline int load_u8 (struct pt_regs * regs , const u8 * addr , u8 * r_val )
269
- {
270
- u8 val ;
271
-
272
- asm volatile ("lbu %0, %1" : "=&r" (val ) : "m" (* addr ));
273
- * r_val = val ;
274
-
275
- return 0 ;
276
- }
277
-
278
- static inline int store_u8 (struct pt_regs * regs , u8 * addr , u8 val )
279
- {
280
- asm volatile ("sb %0, %1\n" : : "r" (val ), "m" (* addr ));
281
-
282
- return 0 ;
283
- }
284
-
285
- static inline int get_insn (struct pt_regs * regs , ulong mepc , ulong * r_insn )
286
- {
287
- register ulong __mepc asm ("a2" ) = mepc ;
288
- ulong val , rvc_mask = 3 , tmp ;
289
-
290
- asm ("and %[tmp], %[addr], 2\n"
291
- "bnez %[tmp], 1f\n"
292
- #if defined(CONFIG_64BIT )
293
- __stringify (LWU ) " %[insn], (%[addr])\n"
294
- #else
295
- __stringify (LW ) " %[insn], (%[addr])\n"
296
- #endif
297
- "and %[tmp], %[insn], %[rvc_mask]\n"
298
- "beq %[tmp], %[rvc_mask], 2f\n"
299
- "sll %[insn], %[insn], %[xlen_minus_16]\n"
300
- "srl %[insn], %[insn], %[xlen_minus_16]\n"
301
- "j 2f\n"
302
- "1:\n"
303
- "lhu %[insn], (%[addr])\n"
304
- "and %[tmp], %[insn], %[rvc_mask]\n"
305
- "bne %[tmp], %[rvc_mask], 2f\n"
306
- "lhu %[tmp], 2(%[addr])\n"
307
- "sll %[tmp], %[tmp], 16\n"
308
- "add %[insn], %[insn], %[tmp]\n"
309
- "2:"
310
- : [insn ] "= & r " (val), [tmp] " = & r " (tmp)
311
- : [addr ] "r" (__mepc ), [rvc_mask ] "r" (rvc_mask ),
312
- [xlen_minus_16 ] "i" (XLEN_MINUS_16 ));
313
-
314
- * r_insn = val ;
315
-
316
- return 0 ;
317
- }
318
- #else
319
- static inline int load_u8 (struct pt_regs * regs , const u8 * addr , u8 * r_val )
320
- {
321
- if (user_mode (regs )) {
322
- return __get_user (* r_val , (u8 __user * )addr );
323
- } else {
324
- * r_val = * addr ;
325
- return 0 ;
326
- }
327
- }
328
-
329
- static inline int store_u8 (struct pt_regs * regs , u8 * addr , u8 val )
330
- {
331
- if (user_mode (regs )) {
332
- return __put_user (val , (u8 __user * )addr );
333
- } else {
334
- * addr = val ;
335
- return 0 ;
336
- }
337
- }
338
-
339
- #define __read_insn (regs , insn , insn_addr ) \
267
+ #define __read_insn (regs , insn , insn_addr , type ) \
340
268
({ \
341
269
int __ret; \
342
270
\
343
271
if (user_mode(regs)) { \
344
- __ret = __get_user(insn, insn_addr); \
272
+ __ret = __get_user(insn, (type __user *) insn_addr); \
345
273
} else { \
346
- insn = *(__force u16 *)insn_addr; \
274
+ insn = *(type *)insn_addr; \
347
275
__ret = 0; \
348
276
} \
349
277
\
@@ -356,9 +284,8 @@ static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn)
356
284
357
285
if (epc & 0x2 ) {
358
286
ulong tmp = 0 ;
359
- u16 __user * insn_addr = (u16 __user * )epc ;
360
287
361
- if (__read_insn (regs , insn , insn_addr ))
288
+ if (__read_insn (regs , insn , epc , u16 ))
362
289
return - EFAULT ;
363
290
/* __get_user() uses regular "lw" which sign extend the loaded
364
291
* value make sure to clear higher order bits in case we "or" it
@@ -369,16 +296,14 @@ static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn)
369
296
* r_insn = insn ;
370
297
return 0 ;
371
298
}
372
- insn_addr ++ ;
373
- if (__read_insn (regs , tmp , insn_addr ))
299
+ epc += sizeof ( u16 ) ;
300
+ if (__read_insn (regs , tmp , epc , u16 ))
374
301
return - EFAULT ;
375
302
* r_insn = (tmp << 16 ) | insn ;
376
303
377
304
return 0 ;
378
305
} else {
379
- u32 __user * insn_addr = (u32 __user * )epc ;
380
-
381
- if (__read_insn (regs , insn , insn_addr ))
306
+ if (__read_insn (regs , insn , epc , u32 ))
382
307
return - EFAULT ;
383
308
if ((insn & __INSN_LENGTH_MASK ) == __INSN_LENGTH_32 ) {
384
309
* r_insn = insn ;
@@ -390,7 +315,6 @@ static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn)
390
315
return 0 ;
391
316
}
392
317
}
393
- #endif
394
318
395
319
union reg_data {
396
320
u8 data_bytes [8 ];
@@ -409,7 +333,7 @@ int handle_misaligned_load(struct pt_regs *regs)
409
333
unsigned long epc = regs -> epc ;
410
334
unsigned long insn ;
411
335
unsigned long addr = regs -> badaddr ;
412
- int i , fp = 0 , shift = 0 , len = 0 ;
336
+ int fp = 0 , shift = 0 , len = 0 ;
413
337
414
338
perf_sw_event (PERF_COUNT_SW_ALIGNMENT_FAULTS , 1 , regs , addr );
415
339
@@ -492,9 +416,11 @@ int handle_misaligned_load(struct pt_regs *regs)
492
416
return - EOPNOTSUPP ;
493
417
494
418
val .data_u64 = 0 ;
495
- for ( i = 0 ; i < len ; i ++ ) {
496
- if (load_u8 ( regs , (void * )( addr + i ), & val . data_bytes [ i ] ))
419
+ if ( user_mode ( regs ) ) {
420
+ if (raw_copy_from_user ( & val , (u8 __user * ) addr , len ))
497
421
return -1 ;
422
+ } else {
423
+ memcpy (& val , (u8 * )addr , len );
498
424
}
499
425
500
426
if (!fp )
@@ -515,7 +441,7 @@ int handle_misaligned_store(struct pt_regs *regs)
515
441
unsigned long epc = regs -> epc ;
516
442
unsigned long insn ;
517
443
unsigned long addr = regs -> badaddr ;
518
- int i , len = 0 , fp = 0 ;
444
+ int len = 0 , fp = 0 ;
519
445
520
446
perf_sw_event (PERF_COUNT_SW_ALIGNMENT_FAULTS , 1 , regs , addr );
521
447
@@ -588,9 +514,11 @@ int handle_misaligned_store(struct pt_regs *regs)
588
514
if (!IS_ENABLED (CONFIG_FPU ) && fp )
589
515
return - EOPNOTSUPP ;
590
516
591
- for ( i = 0 ; i < len ; i ++ ) {
592
- if (store_u8 ( regs , ( void * )( addr + i ), val . data_bytes [ i ] ))
517
+ if ( user_mode ( regs ) ) {
518
+ if (raw_copy_to_user (( u8 __user * ) addr , & val , len ))
593
519
return -1 ;
520
+ } else {
521
+ memcpy ((u8 * )addr , & val , len );
594
522
}
595
523
596
524
regs -> epc = epc + INSN_LEN (insn );
0 commit comments