Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
packages
kernel
linux
Commits
3cf731d6
Commit
3cf731d6
authored
Jul 09, 2018
by
Lorenzo "Palinuro" Faletra
Browse files
Import Upstream version 4.16.16
parent
f34edc87
Changes
428
Hide whitespace changes
Inline
Side-by-side
drivers/ata/libata-core.c
View file @
3cf731d6
...
...
@@ -4493,6 +4493,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
{
"C300-CTFDDAC128MAG"
,
"0001"
,
ATA_HORKAGE_NONCQ
,
},
/* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on
SD7SN6S256G and SD8SN8U256G */
{
"SanDisk SD[78]SN*G"
,
NULL
,
ATA_HORKAGE_NONCQ
,
},
/* devices which puke on READ_NATIVE_MAX */
{
"HDS724040KLSA80"
,
"KFAOA20N"
,
ATA_HORKAGE_BROKEN_HPA
,
},
{
"WDC WD3200JD-00KLB0"
,
"WD-WCAMR1130137"
,
ATA_HORKAGE_BROKEN_HPA
},
...
...
@@ -4553,6 +4557,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{
"SanDisk SD7UB3Q*G1001"
,
NULL
,
ATA_HORKAGE_NOLPM
,
},
/* devices that don't properly handle queued TRIM commands */
{
"Micron_M500IT_*"
,
"MU01"
,
ATA_HORKAGE_NO_NCQ_TRIM
|
ATA_HORKAGE_ZERO_AFTER_TRIM
,
},
{
"Micron_M500_*"
,
NULL
,
ATA_HORKAGE_NO_NCQ_TRIM
|
ATA_HORKAGE_ZERO_AFTER_TRIM
,
},
{
"Crucial_CT*M500*"
,
NULL
,
ATA_HORKAGE_NO_NCQ_TRIM
|
...
...
drivers/base/firmware_class.c
View file @
3cf731d6
...
...
@@ -524,7 +524,7 @@ static int fw_add_devm_name(struct device *dev, const char *name)
fwn
=
fw_find_devm_name
(
dev
,
name
);
if
(
fwn
)
return
1
;
return
0
;
fwn
=
devres_alloc
(
fw_name_devm_release
,
sizeof
(
struct
fw_name_devm
),
GFP_KERNEL
);
...
...
@@ -552,6 +552,7 @@ static int assign_fw(struct firmware *fw, struct device *device,
unsigned
int
opt_flags
)
{
struct
fw_priv
*
fw_priv
=
fw
->
priv
;
int
ret
;
mutex_lock
(
&
fw_lock
);
if
(
!
fw_priv
->
size
||
fw_state_is_aborted
(
fw_priv
))
{
...
...
@@ -568,8 +569,13 @@ static int assign_fw(struct firmware *fw, struct device *device,
*/
/* don't cache firmware handled without uevent */
if
(
device
&&
(
opt_flags
&
FW_OPT_UEVENT
)
&&
!
(
opt_flags
&
FW_OPT_NOCACHE
))
fw_add_devm_name
(
device
,
fw_priv
->
fw_name
);
!
(
opt_flags
&
FW_OPT_NOCACHE
))
{
ret
=
fw_add_devm_name
(
device
,
fw_priv
->
fw_name
);
if
(
ret
)
{
mutex_unlock
(
&
fw_lock
);
return
ret
;
}
}
/*
* After caching firmware image is started, let it piggyback
...
...
drivers/base/power/main.c
View file @
3cf731d6
...
...
@@ -1923,10 +1923,8 @@ static int device_prepare(struct device *dev, pm_message_t state)
dev
->
power
.
wakeup_path
=
false
;
if
(
dev
->
power
.
no_pm_callbacks
)
{
ret
=
1
;
/* Let device go direct_complete */
if
(
dev
->
power
.
no_pm_callbacks
)
goto
unlock
;
}
if
(
dev
->
pm_domain
)
callback
=
dev
->
pm_domain
->
ops
.
prepare
;
...
...
@@ -1960,7 +1958,8 @@ static int device_prepare(struct device *dev, pm_message_t state)
*/
spin_lock_irq
(
&
dev
->
power
.
lock
);
dev
->
power
.
direct_complete
=
state
.
event
==
PM_EVENT_SUSPEND
&&
pm_runtime_suspended
(
dev
)
&&
ret
>
0
&&
((
pm_runtime_suspended
(
dev
)
&&
ret
>
0
)
||
dev
->
power
.
no_pm_callbacks
)
&&
!
dev_pm_test_driver_flags
(
dev
,
DPM_FLAG_NEVER_SKIP
);
spin_unlock_irq
(
&
dev
->
power
.
lock
);
return
0
;
...
...
drivers/base/regmap/regmap.c
View file @
3cf731d6
...
...
@@ -99,7 +99,7 @@ bool regmap_cached(struct regmap *map, unsigned int reg)
int
ret
;
unsigned
int
val
;
if
(
map
->
cache
==
REGCACHE_NONE
)
if
(
map
->
cache
_type
==
REGCACHE_NONE
)
return
false
;
if
(
!
map
->
cache_ops
)
...
...
drivers/bcma/driver_mips.c
View file @
3cf731d6
...
...
@@ -184,7 +184,7 @@ static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq)
{
int
i
;
static
const
char
*
irq_name
[]
=
{
"2(S)"
,
"3"
,
"4"
,
"5"
,
"6"
,
"D"
,
"I"
};
char
interrupts
[
2
0
];
char
interrupts
[
2
5
];
char
*
ints
=
interrupts
;
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
irq_name
);
i
++
)
...
...
drivers/block/null_blk.c
View file @
3cf731d6
...
...
@@ -72,6 +72,7 @@ enum nullb_device_flags {
NULLB_DEV_FL_CACHE
=
3
,
};
#define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
/*
* nullb_page is a page in memory for nullb devices.
*
...
...
@@ -86,10 +87,10 @@ enum nullb_device_flags {
*/
struct
nullb_page
{
struct
page
*
page
;
unsigned
long
bitmap
;
DECLARE_BITMAP
(
bitmap
,
MAP_SZ
)
;
};
#define NULLB_PAGE_LOCK (
sizeof(unsigned long) * 8
- 1)
#define NULLB_PAGE_FREE (
sizeof(unsigned long) * 8
- 2)
#define NULLB_PAGE_LOCK (
MAP_SZ
- 1)
#define NULLB_PAGE_FREE (
MAP_SZ
- 2)
struct
nullb_device
{
struct
nullb
*
nullb
;
...
...
@@ -728,7 +729,7 @@ static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
if
(
!
t_page
->
page
)
goto
out_freepage
;
t_page
->
bitmap
=
0
;
memset
(
t_page
->
bitmap
,
0
,
sizeof
(
t_page
->
bitmap
))
;
return
t_page
;
out_freepage:
kfree
(
t_page
);
...
...
@@ -738,13 +739,20 @@ static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
static
void
null_free_page
(
struct
nullb_page
*
t_page
)
{
__set_bit
(
NULLB_PAGE_FREE
,
&
t_page
->
bitmap
);
if
(
test_bit
(
NULLB_PAGE_LOCK
,
&
t_page
->
bitmap
))
__set_bit
(
NULLB_PAGE_FREE
,
t_page
->
bitmap
);
if
(
test_bit
(
NULLB_PAGE_LOCK
,
t_page
->
bitmap
))
return
;
__free_page
(
t_page
->
page
);
kfree
(
t_page
);
}
static
bool
null_page_empty
(
struct
nullb_page
*
page
)
{
int
size
=
MAP_SZ
-
2
;
return
find_first_bit
(
page
->
bitmap
,
size
)
==
size
;
}
static
void
null_free_sector
(
struct
nullb
*
nullb
,
sector_t
sector
,
bool
is_cache
)
{
...
...
@@ -759,9 +767,9 @@ static void null_free_sector(struct nullb *nullb, sector_t sector,
t_page
=
radix_tree_lookup
(
root
,
idx
);
if
(
t_page
)
{
__clear_bit
(
sector_bit
,
&
t_page
->
bitmap
);
__clear_bit
(
sector_bit
,
t_page
->
bitmap
);
if
(
!
t
_page
->
bitmap
)
{
if
(
null
_page
_empty
(
t_page
)
)
{
ret
=
radix_tree_delete_item
(
root
,
idx
,
t_page
);
WARN_ON
(
ret
!=
t_page
);
null_free_page
(
ret
);
...
...
@@ -832,7 +840,7 @@ static struct nullb_page *__null_lookup_page(struct nullb *nullb,
t_page
=
radix_tree_lookup
(
root
,
idx
);
WARN_ON
(
t_page
&&
t_page
->
page
->
index
!=
idx
);
if
(
t_page
&&
(
for_write
||
test_bit
(
sector_bit
,
&
t_page
->
bitmap
)))
if
(
t_page
&&
(
for_write
||
test_bit
(
sector_bit
,
t_page
->
bitmap
)))
return
t_page
;
return
NULL
;
...
...
@@ -895,10 +903,10 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
t_page
=
null_insert_page
(
nullb
,
idx
<<
PAGE_SECTORS_SHIFT
,
true
);
__clear_bit
(
NULLB_PAGE_LOCK
,
&
c_page
->
bitmap
);
if
(
test_bit
(
NULLB_PAGE_FREE
,
&
c_page
->
bitmap
))
{
__clear_bit
(
NULLB_PAGE_LOCK
,
c_page
->
bitmap
);
if
(
test_bit
(
NULLB_PAGE_FREE
,
c_page
->
bitmap
))
{
null_free_page
(
c_page
);
if
(
t_page
&&
t
_page
->
bitmap
==
0
)
{
if
(
t_page
&&
null
_page
_empty
(
t_page
)
)
{
ret
=
radix_tree_delete_item
(
&
nullb
->
dev
->
data
,
idx
,
t_page
);
null_free_page
(
t_page
);
...
...
@@ -914,11 +922,11 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
for
(
i
=
0
;
i
<
PAGE_SECTORS
;
i
+=
(
nullb
->
dev
->
blocksize
>>
SECTOR_SHIFT
))
{
if
(
test_bit
(
i
,
&
c_page
->
bitmap
))
{
if
(
test_bit
(
i
,
c_page
->
bitmap
))
{
offset
=
(
i
<<
SECTOR_SHIFT
);
memcpy
(
dst
+
offset
,
src
+
offset
,
nullb
->
dev
->
blocksize
);
__set_bit
(
i
,
&
t_page
->
bitmap
);
__set_bit
(
i
,
t_page
->
bitmap
);
}
}
...
...
@@ -955,10 +963,10 @@ static int null_make_cache_space(struct nullb *nullb, unsigned long n)
* We found the page which is being flushed to disk by other
* threads
*/
if
(
test_bit
(
NULLB_PAGE_LOCK
,
&
c_pages
[
i
]
->
bitmap
))
if
(
test_bit
(
NULLB_PAGE_LOCK
,
c_pages
[
i
]
->
bitmap
))
c_pages
[
i
]
=
NULL
;
else
__set_bit
(
NULLB_PAGE_LOCK
,
&
c_pages
[
i
]
->
bitmap
);
__set_bit
(
NULLB_PAGE_LOCK
,
c_pages
[
i
]
->
bitmap
);
}
one_round
=
0
;
...
...
@@ -1011,7 +1019,7 @@ static int copy_to_nullb(struct nullb *nullb, struct page *source,
kunmap_atomic
(
dst
);
kunmap_atomic
(
src
);
__set_bit
(
sector
&
SECTOR_MASK
,
&
t_page
->
bitmap
);
__set_bit
(
sector
&
SECTOR_MASK
,
t_page
->
bitmap
);
if
(
is_fua
)
null_free_sector
(
nullb
,
sector
,
true
);
...
...
@@ -1802,10 +1810,6 @@ static int __init null_init(void)
struct
nullb
*
nullb
;
struct
nullb_device
*
dev
;
/* check for nullb_page.bitmap */
if
(
sizeof
(
unsigned
long
)
*
8
-
2
<
(
PAGE_SIZE
>>
SECTOR_SHIFT
))
return
-
EINVAL
;
if
(
g_bs
>
PAGE_SIZE
)
{
pr_warn
(
"null_blk: invalid block size
\n
"
);
pr_warn
(
"null_blk: defaults block size to %lu
\n
"
,
PAGE_SIZE
);
...
...
drivers/block/paride/pcd.c
View file @
3cf731d6
...
...
@@ -230,6 +230,8 @@ static int pcd_block_open(struct block_device *bdev, fmode_t mode)
struct
pcd_unit
*
cd
=
bdev
->
bd_disk
->
private_data
;
int
ret
;
check_disk_change
(
bdev
);
mutex_lock
(
&
pcd_mutex
);
ret
=
cdrom_open
(
&
cd
->
info
,
bdev
,
mode
);
mutex_unlock
(
&
pcd_mutex
);
...
...
drivers/cdrom/cdrom.c
View file @
3cf731d6
...
...
@@ -1152,9 +1152,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
cd_dbg
(
CD_OPEN
,
"entering cdrom_open
\n
"
);
/* open is event synchronization point, check events first */
check_disk_change
(
bdev
);
/* if this was a O_NONBLOCK open and we should honor the flags,
* do a quick open without drive/disc integrity checks. */
cdi
->
use_count
++
;
...
...
drivers/cdrom/gdrom.c
View file @
3cf731d6
...
...
@@ -497,6 +497,9 @@ static const struct cdrom_device_ops gdrom_ops = {
static
int
gdrom_bdops_open
(
struct
block_device
*
bdev
,
fmode_t
mode
)
{
int
ret
;
check_disk_change
(
bdev
);
mutex_lock
(
&
gdrom_mutex
);
ret
=
cdrom_open
(
gd
.
cd_info
,
bdev
,
mode
);
mutex_unlock
(
&
gdrom_mutex
);
...
...
drivers/char/hw_random/bcm2835-rng.c
View file @
3cf731d6
...
...
@@ -163,6 +163,8 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
/* Clock is optional on most platforms */
priv
->
clk
=
devm_clk_get
(
dev
,
NULL
);
if
(
IS_ERR
(
priv
->
clk
)
&&
PTR_ERR
(
priv
->
clk
)
==
-
EPROBE_DEFER
)
return
-
EPROBE_DEFER
;
priv
->
rng
.
name
=
pdev
->
name
;
priv
->
rng
.
init
=
bcm2835_rng_init
;
...
...
drivers/char/hw_random/stm32-rng.c
View file @
3cf731d6
...
...
@@ -21,6 +21,7 @@
#include
<linux/of_address.h>
#include
<linux/of_platform.h>
#include
<linux/pm_runtime.h>
#include
<linux/reset.h>
#include
<linux/slab.h>
#define RNG_CR 0x00
...
...
@@ -46,6 +47,7 @@ struct stm32_rng_private {
struct
hwrng
rng
;
void
__iomem
*
base
;
struct
clk
*
clk
;
struct
reset_control
*
rst
;
};
static
int
stm32_rng_read
(
struct
hwrng
*
rng
,
void
*
data
,
size_t
max
,
bool
wait
)
...
...
@@ -140,6 +142,13 @@ static int stm32_rng_probe(struct platform_device *ofdev)
if
(
IS_ERR
(
priv
->
clk
))
return
PTR_ERR
(
priv
->
clk
);
priv
->
rst
=
devm_reset_control_get
(
&
ofdev
->
dev
,
NULL
);
if
(
!
IS_ERR
(
priv
->
rst
))
{
reset_control_assert
(
priv
->
rst
);
udelay
(
2
);
reset_control_deassert
(
priv
->
rst
);
}
dev_set_drvdata
(
dev
,
priv
);
priv
->
rng
.
name
=
dev_driver_string
(
dev
),
...
...
drivers/char/ipmi/ipmi_ssif.c
View file @
3cf731d6
...
...
@@ -761,7 +761,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
ssif_info
->
ssif_state
=
SSIF_NORMAL
;
ipmi_ssif_unlock_cond
(
ssif_info
,
flags
);
pr_warn
(
PFX
"Error getting flags: %d %d, %x
\n
"
,
result
,
len
,
data
[
2
]);
result
,
len
,
(
len
>=
3
)
?
data
[
2
]
:
0
);
}
else
if
(
data
[
0
]
!=
(
IPMI_NETFN_APP_REQUEST
|
1
)
<<
2
||
data
[
1
]
!=
IPMI_GET_MSG_FLAGS_CMD
)
{
/*
...
...
@@ -783,7 +783,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
if
((
result
<
0
)
||
(
len
<
3
)
||
(
data
[
2
]
!=
0
))
{
/* Error clearing flags */
pr_warn
(
PFX
"Error clearing flags: %d %d, %x
\n
"
,
result
,
len
,
data
[
2
]);
result
,
len
,
(
len
>=
3
)
?
data
[
2
]
:
0
);
}
else
if
(
data
[
0
]
!=
(
IPMI_NETFN_APP_REQUEST
|
1
)
<<
2
||
data
[
1
]
!=
IPMI_CLEAR_MSG_FLAGS_CMD
)
{
pr_warn
(
PFX
"Invalid response clearing flags: %x %x
\n
"
,
...
...
drivers/cpufreq/cppc_cpufreq.c
View file @
3cf731d6
...
...
@@ -167,9 +167,19 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
NSEC_PER_USEC
;
policy
->
shared_type
=
cpu
->
shared_type
;
if
(
policy
->
shared_type
==
CPUFREQ_SHARED_TYPE_ANY
)
if
(
policy
->
shared_type
==
CPUFREQ_SHARED_TYPE_ANY
)
{
int
i
;
cpumask_copy
(
policy
->
cpus
,
cpu
->
shared_cpu_map
);
else
if
(
policy
->
shared_type
==
CPUFREQ_SHARED_TYPE_ALL
)
{
for_each_cpu
(
i
,
policy
->
cpus
)
{
if
(
unlikely
(
i
==
policy
->
cpu
))
continue
;
memcpy
(
&
all_cpu_data
[
i
]
->
perf_caps
,
&
cpu
->
perf_caps
,
sizeof
(
cpu
->
perf_caps
));
}
}
else
if
(
policy
->
shared_type
==
CPUFREQ_SHARED_TYPE_ALL
)
{
/* Support only SW_ANY for now. */
pr_debug
(
"Unsupported CPU co-ord type
\n
"
);
return
-
EFAULT
;
...
...
@@ -233,8 +243,13 @@ static int __init cppc_cpufreq_init(void)
return
ret
;
out:
for_each_possible_cpu
(
i
)
kfree
(
all_cpu_data
[
i
]);
for_each_possible_cpu
(
i
)
{
cpu
=
all_cpu_data
[
i
];
if
(
!
cpu
)
break
;
free_cpumask_var
(
cpu
->
shared_cpu_map
);
kfree
(
cpu
);
}
kfree
(
all_cpu_data
);
return
-
ENODEV
;
...
...
drivers/cpufreq/cpufreq.c
View file @
3cf731d6
...
...
@@ -1327,14 +1327,14 @@ static int cpufreq_online(unsigned int cpu)
return
0
;
out_exit_policy:
for_each_cpu
(
j
,
policy
->
real_cpus
)
remove_cpu_dev_symlink
(
policy
,
get_cpu_device
(
j
));
up_write
(
&
policy
->
rwsem
);
if
(
cpufreq_driver
->
exit
)
cpufreq_driver
->
exit
(
policy
);
for_each_cpu
(
j
,
policy
->
real_cpus
)
remove_cpu_dev_symlink
(
policy
,
get_cpu_device
(
j
));
out_free_policy:
cpufreq_policy_free
(
policy
);
return
ret
;
...
...
drivers/crypto/caam/caamalg.c
View file @
3cf731d6
...
...
@@ -760,15 +760,18 @@ struct aead_edesc {
* @src_nents: number of segments in input s/w scatterlist
* @dst_nents: number of segments in output s/w scatterlist
* @iv_dma: dma address of iv for checking continuity and link table
* @iv_dir: DMA mapping direction for IV
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
* @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
* and IV
*/
struct
ablkcipher_edesc
{
int
src_nents
;
int
dst_nents
;
dma_addr_t
iv_dma
;
enum
dma_data_direction
iv_dir
;
int
sec4_sg_bytes
;
dma_addr_t
sec4_sg_dma
;
struct
sec4_sg_entry
*
sec4_sg
;
...
...
@@ -778,7 +781,8 @@ struct ablkcipher_edesc {
static
void
caam_unmap
(
struct
device
*
dev
,
struct
scatterlist
*
src
,
struct
scatterlist
*
dst
,
int
src_nents
,
int
dst_nents
,
dma_addr_t
iv_dma
,
int
ivsize
,
dma_addr_t
sec4_sg_dma
,
dma_addr_t
iv_dma
,
int
ivsize
,
enum
dma_data_direction
iv_dir
,
dma_addr_t
sec4_sg_dma
,
int
sec4_sg_bytes
)
{
if
(
dst
!=
src
)
{
...
...
@@ -790,7 +794,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
}
if
(
iv_dma
)
dma_unmap_single
(
dev
,
iv_dma
,
ivsize
,
DMA_TO_DEVICE
);
dma_unmap_single
(
dev
,
iv_dma
,
ivsize
,
iv_dir
);
if
(
sec4_sg_bytes
)
dma_unmap_single
(
dev
,
sec4_sg_dma
,
sec4_sg_bytes
,
DMA_TO_DEVICE
);
...
...
@@ -801,7 +805,7 @@ static void aead_unmap(struct device *dev,
struct
aead_request
*
req
)
{
caam_unmap
(
dev
,
req
->
src
,
req
->
dst
,
edesc
->
src_nents
,
edesc
->
dst_nents
,
0
,
0
,
edesc
->
src_nents
,
edesc
->
dst_nents
,
0
,
0
,
DMA_NONE
,
edesc
->
sec4_sg_dma
,
edesc
->
sec4_sg_bytes
);
}
...
...
@@ -814,7 +818,7 @@ static void ablkcipher_unmap(struct device *dev,
caam_unmap
(
dev
,
req
->
src
,
req
->
dst
,
edesc
->
src_nents
,
edesc
->
dst_nents
,
edesc
->
iv_dma
,
ivsize
,
edesc
->
iv_dma
,
ivsize
,
edesc
->
iv_dir
,
edesc
->
sec4_sg_dma
,
edesc
->
sec4_sg_bytes
);
}
...
...
@@ -903,6 +907,18 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
scatterwalk_map_and_copy
(
req
->
info
,
req
->
dst
,
req
->
nbytes
-
ivsize
,
ivsize
,
0
);
/* In case initial IV was generated, copy it in GIVCIPHER request */
if
(
edesc
->
iv_dir
==
DMA_FROM_DEVICE
)
{
u8
*
iv
;
struct
skcipher_givcrypt_request
*
greq
;
greq
=
container_of
(
req
,
struct
skcipher_givcrypt_request
,
creq
);
iv
=
(
u8
*
)
edesc
->
hw_desc
+
desc_bytes
(
edesc
->
hw_desc
)
+
edesc
->
sec4_sg_bytes
;
memcpy
(
greq
->
giv
,
iv
,
ivsize
);
}
kfree
(
edesc
);
ablkcipher_request_complete
(
req
,
err
);
...
...
@@ -913,10 +929,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
{
struct
ablkcipher_request
*
req
=
context
;
struct
ablkcipher_edesc
*
edesc
;
#ifdef DEBUG
struct
crypto_ablkcipher
*
ablkcipher
=
crypto_ablkcipher_reqtfm
(
req
);
int
ivsize
=
crypto_ablkcipher_ivsize
(
ablkcipher
);
#ifdef DEBUG
dev_err
(
jrdev
,
"%s %d: err 0x%x
\n
"
,
__func__
,
__LINE__
,
err
);
#endif
...
...
@@ -934,14 +950,6 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
edesc
->
dst_nents
>
1
?
100
:
req
->
nbytes
,
1
);
ablkcipher_unmap
(
jrdev
,
edesc
,
req
);
/*
* The crypto API expects us to set the IV (req->info) to the last
* ciphertext block.
*/
scatterwalk_map_and_copy
(
req
->
info
,
req
->
src
,
req
->
nbytes
-
ivsize
,
ivsize
,
0
);
kfree
(
edesc
);
ablkcipher_request_complete
(
req
,
err
);
...
...
@@ -1090,15 +1098,14 @@ static void init_authenc_job(struct aead_request *req,
*/
static
void
init_ablkcipher_job
(
u32
*
sh_desc
,
dma_addr_t
ptr
,
struct
ablkcipher_edesc
*
edesc
,
struct
ablkcipher_request
*
req
,
bool
iv_contig
)
struct
ablkcipher_request
*
req
)
{
struct
crypto_ablkcipher
*
ablkcipher
=
crypto_ablkcipher_reqtfm
(
req
);
int
ivsize
=
crypto_ablkcipher_ivsize
(
ablkcipher
);
u32
*
desc
=
edesc
->
hw_desc
;
u32
out_options
=
0
,
in_options
;
dma_addr_t
dst_dma
,
src_dma
;
int
len
,
sec4_sg_index
=
0
;
u32
out_options
=
0
;
dma_addr_t
dst_dma
;
int
len
;
#ifdef DEBUG
print_hex_dump
(
KERN_ERR
,
"presciv@"
__stringify
(
__LINE__
)
": "
,
...
...
@@ -1114,30 +1121,18 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
len
=
desc_len
(
sh_desc
);
init_job_desc_shared
(
desc
,
ptr
,
len
,
HDR_SHARE_DEFER
|
HDR_REVERSE
);
if
(
iv_contig
)
{
src_dma
=
edesc
->
iv_dma
;
in_options
=
0
;
}
else
{
src_dma
=
edesc
->
sec4_sg_dma
;
sec4_sg_index
+=
edesc
->
src_nents
+
1
;
in_options
=
LDST_SGF
;
}
append_seq_in_ptr
(
desc
,
src_dma
,
req
->
nbytes
+
ivsize
,
in_options
);
append_seq_in_ptr
(
desc
,
edesc
->
sec4_sg_dma
,
req
->
nbytes
+
ivsize
,
LDST_SGF
);
if
(
likely
(
req
->
src
==
req
->
dst
))
{
if
(
edesc
->
src_nents
==
1
&&
iv_contig
)
{
dst_dma
=
sg_dma_address
(
req
->
src
);
}
else
{
dst_dma
=
edesc
->
sec4_sg_dma
+
sizeof
(
struct
sec4_sg_entry
);
out_options
=
LDST_SGF
;
}
dst_dma
=
edesc
->
sec4_sg_dma
+
sizeof
(
struct
sec4_sg_entry
);
out_options
=
LDST_SGF
;
}
else
{
if
(
edesc
->
dst_nents
==
1
)
{
dst_dma
=
sg_dma_address
(
req
->
dst
);
}
else
{
dst_dma
=
edesc
->
sec4_sg_dma
+
sec4_sg_index
*
sizeof
(
struct
sec4_sg_entry
);
dst_dma
=
edesc
->
sec4_sg_dma
+
(
edesc
->
src_nents
+
1
)
*
sizeof
(
struct
sec4_sg_entry
);
out_options
=
LDST_SGF
;
}
}
...
...
@@ -1149,13 +1144,12 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
*/
static
void
init_ablkcipher_giv_job
(
u32
*
sh_desc
,
dma_addr_t
ptr
,
struct
ablkcipher_edesc
*
edesc
,
struct
ablkcipher_request
*
req
,
bool
iv_contig
)
struct
ablkcipher_request
*
req
)
{
struct
crypto_ablkcipher
*
ablkcipher
=
crypto_ablkcipher_reqtfm
(
req
);
int
ivsize
=
crypto_ablkcipher_ivsize
(
ablkcipher
);
u32
*
desc
=
edesc
->
hw_desc
;
u32
out_options
,
in_options
;
u32
in_options
;
dma_addr_t
dst_dma
,
src_dma
;
int
len
,
sec4_sg_index
=
0
;
...
...
@@ -1181,15 +1175,9 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
}
append_seq_in_ptr
(
desc
,
src_dma
,
req
->
nbytes
,
in_options
);
if
(
iv_contig
)
{
dst_dma
=
edesc
->
iv_dma
;
out_options
=
0
;
}
else
{
dst_dma
=
edesc
->
sec4_sg_dma
+
sec4_sg_index
*
sizeof
(
struct
sec4_sg_entry
);
out_options
=
LDST_SGF
;
}
append_seq_out_ptr
(
desc
,
dst_dma
,
req
->
nbytes
+
ivsize
,
out_options
);
dst_dma
=
edesc
->
sec4_sg_dma
+
sec4_sg_index
*
sizeof
(
struct
sec4_sg_entry
);
append_seq_out_ptr
(
desc
,
dst_dma
,
req
->
nbytes
+
ivsize
,
LDST_SGF
);
}
/*
...
...
@@ -1278,7 +1266,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
GFP_DMA
|
flags
);
if
(
!
edesc
)
{
caam_unmap
(
jrdev
,
req
->
src
,
req
->
dst
,
src_nents
,
dst_nents
,
0
,
0
,
0
,
0
);
0
,
DMA_NONE
,
0
,
0
);
return
ERR_PTR
(
-
ENOMEM
);
}
...
...
@@ -1482,8 +1470,7 @@ static int aead_decrypt(struct aead_request *req)
* allocate and map the ablkcipher extended descriptor for ablkcipher
*/
static
struct
ablkcipher_edesc
*
ablkcipher_edesc_alloc
(
struct
ablkcipher_request
*
req
,
int
desc_bytes
,
bool
*
iv_contig_out
)
*
req
,
int
desc_bytes
)
{
struct
crypto_ablkcipher
*
ablkcipher
=
crypto_ablkcipher_reqtfm
(
req
);
struct
caam_ctx
*
ctx
=
crypto_ablkcipher_ctx
(
ablkcipher
);
...
...
@@ -1492,8 +1479,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
GFP_KERNEL
:
GFP_ATOMIC
;
int
src_nents
,
mapped_src_nents
,
dst_nents
=
0
,
mapped_dst_nents
=
0
;
struct
ablkcipher_edesc
*
edesc
;
dma_addr_t
iv_dma
=
0
;
bool
in_contig
;
dma_addr_t
iv_dma
;
u8
*
iv
;
int
ivsize
=
crypto_ablkcipher_ivsize
(
ablkcipher
);
int
dst_sg_idx
,
sec4_sg_ents
,
sec4_sg_bytes
;
...
...
@@ -1537,33 +1524,20 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
}
}
iv_dma
=
dma_map_single
(
jrdev
,
req
->
info
,
ivsize
,
DMA_TO_DEVICE
);
if
(
dma_mapping_error
(
jrdev
,
iv_dma
))
{
dev_err
(
jrdev
,
"unable to map IV
\n
"
);
caam_unmap
(
jrdev
,
req
->
src
,
req
->
dst
,
src_nents
,
dst_nents
,
0
,
0
,
0
,
0
);
return
ERR_PTR
(
-
ENOMEM
);
}
if
(
mapped_src_nents
==
1
&&
iv_dma
+
ivsize
==
sg_dma_address
(
req
->
src
))
{
in_contig
=
true
;
sec4_sg_ents
=
0
;
}
else
{
in_contig
=
false
;
sec4_sg_ents
=
1
+
mapped_src_nents
;