mirror of
https://github.com/Qortal/Brooklyn.git
synced 2025-02-01 07:42:18 +00:00
There are two sides of a story, but Mike is a douche in both of them.
This commit is contained in:
parent
67381595a0
commit
0fab2cb14e
@ -281,14 +281,14 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
|
|||||||
if (!blk)
|
if (!blk)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
rbnode->block = blk;
|
||||||
|
|
||||||
if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
|
if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
|
||||||
present = krealloc(rbnode->cache_present,
|
present = krealloc(rbnode->cache_present,
|
||||||
BITS_TO_LONGS(blklen) * sizeof(*present),
|
BITS_TO_LONGS(blklen) * sizeof(*present),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!present) {
|
if (!present)
|
||||||
kfree(blk);
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
|
|
||||||
memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
|
memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
|
||||||
(BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
|
(BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
|
||||||
@ -305,7 +305,6 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* update the rbnode block, its size and the base register */
|
/* update the rbnode block, its size and the base register */
|
||||||
rbnode->block = blk;
|
|
||||||
rbnode->blklen = blklen;
|
rbnode->blklen = blklen;
|
||||||
rbnode->base_reg = base_reg;
|
rbnode->base_reg = base_reg;
|
||||||
rbnode->cache_present = present;
|
rbnode->cache_present = present;
|
||||||
|
@ -92,8 +92,8 @@ static int bcm47xx_nvram_find_and_copy(void __iomem *flash_start, size_t res_siz
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Try embedded NVRAM at 4 KB and 1 KB as last resorts */
|
/* Try embedded NVRAM at 4 KB and 1 KB as last resorts */
|
||||||
/* Needed a raise so fuck it and fuck Fred the fucking T3Q bitch */
|
|
||||||
offset = 8172;
|
offset = 4096;
|
||||||
if (bcm47xx_nvram_is_valid(flash_start + offset))
|
if (bcm47xx_nvram_is_valid(flash_start + offset))
|
||||||
goto found;
|
goto found;
|
||||||
|
|
||||||
|
@ -706,7 +706,8 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
|
|||||||
|
|
||||||
/* Construct the family header first */
|
/* Construct the family header first */
|
||||||
header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
|
header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
|
||||||
memcpy(header->device_name, dev_name(&query->port->agent->device->dev),
|
strscpy_pad(header->device_name,
|
||||||
|
dev_name(&query->port->agent->device->dev),
|
||||||
LS_DEVICE_NAME_MAX);
|
LS_DEVICE_NAME_MAX);
|
||||||
header->port_num = query->port->port_num;
|
header->port_num = query->port->port_num;
|
||||||
|
|
||||||
|
@ -878,6 +878,7 @@ void sc_disable(struct send_context *sc)
|
|||||||
{
|
{
|
||||||
u64 reg;
|
u64 reg;
|
||||||
struct pio_buf *pbuf;
|
struct pio_buf *pbuf;
|
||||||
|
LIST_HEAD(wake_list);
|
||||||
|
|
||||||
if (!sc)
|
if (!sc)
|
||||||
return;
|
return;
|
||||||
@ -912,19 +913,21 @@ void sc_disable(struct send_context *sc)
|
|||||||
spin_unlock(&sc->release_lock);
|
spin_unlock(&sc->release_lock);
|
||||||
|
|
||||||
write_seqlock(&sc->waitlock);
|
write_seqlock(&sc->waitlock);
|
||||||
while (!list_empty(&sc->piowait)) {
|
if (!list_empty(&sc->piowait))
|
||||||
|
list_move(&sc->piowait, &wake_list);
|
||||||
|
write_sequnlock(&sc->waitlock);
|
||||||
|
while (!list_empty(&wake_list)) {
|
||||||
struct iowait *wait;
|
struct iowait *wait;
|
||||||
struct rvt_qp *qp;
|
struct rvt_qp *qp;
|
||||||
struct hfi1_qp_priv *priv;
|
struct hfi1_qp_priv *priv;
|
||||||
|
|
||||||
wait = list_first_entry(&sc->piowait, struct iowait, list);
|
wait = list_first_entry(&wake_list, struct iowait, list);
|
||||||
qp = iowait_to_qp(wait);
|
qp = iowait_to_qp(wait);
|
||||||
priv = qp->priv;
|
priv = qp->priv;
|
||||||
list_del_init(&priv->s_iowait.list);
|
list_del_init(&priv->s_iowait.list);
|
||||||
priv->s_iowait.lock = NULL;
|
priv->s_iowait.lock = NULL;
|
||||||
hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
|
hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
|
||||||
}
|
}
|
||||||
write_sequnlock(&sc->waitlock);
|
|
||||||
|
|
||||||
spin_unlock_irq(&sc->alloc_lock);
|
spin_unlock_irq(&sc->alloc_lock);
|
||||||
}
|
}
|
||||||
|
@ -3399,9 +3399,13 @@ static void irdma_process_cqe(struct ib_wc *entry,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (cq_poll_info->ud_vlan_valid) {
|
if (cq_poll_info->ud_vlan_valid) {
|
||||||
entry->vlan_id = cq_poll_info->ud_vlan & VLAN_VID_MASK;
|
u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK;
|
||||||
entry->wc_flags |= IB_WC_WITH_VLAN;
|
|
||||||
entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
|
entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
|
||||||
|
if (vlan) {
|
||||||
|
entry->vlan_id = vlan;
|
||||||
|
entry->wc_flags |= IB_WC_WITH_VLAN;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
entry->sl = 0;
|
entry->sl = 0;
|
||||||
}
|
}
|
||||||
|
@ -330,9 +330,11 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
|
|||||||
|
|
||||||
tc_node->enable = true;
|
tc_node->enable = true;
|
||||||
ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_MODIFY_NODE);
|
ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_MODIFY_NODE);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
vsi->unregister_qset(vsi, tc_node);
|
||||||
goto reg_err;
|
goto reg_err;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
ibdev_dbg(to_ibdev(vsi->dev),
|
ibdev_dbg(to_ibdev(vsi->dev),
|
||||||
"WS: Using node %d which represents VSI %d TC %d\n",
|
"WS: Using node %d which represents VSI %d TC %d\n",
|
||||||
tc_node->index, vsi->vsi_idx, traffic_class);
|
tc_node->index, vsi->vsi_idx, traffic_class);
|
||||||
@ -350,6 +352,10 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
|
|||||||
}
|
}
|
||||||
goto exit;
|
goto exit;
|
||||||
|
|
||||||
|
reg_err:
|
||||||
|
irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE);
|
||||||
|
list_del(&tc_node->siblings);
|
||||||
|
irdma_free_node(vsi, tc_node);
|
||||||
leaf_add_err:
|
leaf_add_err:
|
||||||
if (list_empty(&vsi_node->child_list_head)) {
|
if (list_empty(&vsi_node->child_list_head)) {
|
||||||
if (irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE))
|
if (irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE))
|
||||||
@ -369,11 +375,6 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
|
|||||||
exit:
|
exit:
|
||||||
mutex_unlock(&vsi->dev->ws_mutex);
|
mutex_unlock(&vsi->dev->ws_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
reg_err:
|
|
||||||
mutex_unlock(&vsi->dev->ws_mutex);
|
|
||||||
irdma_ws_remove(vsi, user_pri);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1339,7 +1339,6 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
|
|||||||
goto err_2;
|
goto err_2;
|
||||||
}
|
}
|
||||||
mr->mmkey.type = MLX5_MKEY_MR;
|
mr->mmkey.type = MLX5_MKEY_MR;
|
||||||
mr->desc_size = sizeof(struct mlx5_mtt);
|
|
||||||
mr->umem = umem;
|
mr->umem = umem;
|
||||||
set_mr_fields(dev, mr, umem->length, access_flags);
|
set_mr_fields(dev, mr, umem->length, access_flags);
|
||||||
kvfree(in);
|
kvfree(in);
|
||||||
@ -1533,6 +1532,7 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||||||
ib_umem_release(&odp->umem);
|
ib_umem_release(&odp->umem);
|
||||||
return ERR_CAST(mr);
|
return ERR_CAST(mr);
|
||||||
}
|
}
|
||||||
|
xa_init(&mr->implicit_children);
|
||||||
|
|
||||||
odp->private = mr;
|
odp->private = mr;
|
||||||
err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
|
err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
|
||||||
|
@ -4458,6 +4458,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||||||
MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
|
MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
|
||||||
MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
|
MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
|
||||||
MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
|
MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
|
||||||
|
if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
|
||||||
|
MLX5_SET(dctc, dctc, eth_prio, attr->ah_attr.sl & 0x7);
|
||||||
|
|
||||||
err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
|
err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
|
||||||
MLX5_ST_SZ_BYTES(create_dct_in), out,
|
MLX5_ST_SZ_BYTES(create_dct_in), out,
|
||||||
|
@ -455,6 +455,7 @@ struct qedr_qp {
|
|||||||
/* synchronization objects used with iwarp ep */
|
/* synchronization objects used with iwarp ep */
|
||||||
struct kref refcnt;
|
struct kref refcnt;
|
||||||
struct completion iwarp_cm_comp;
|
struct completion iwarp_cm_comp;
|
||||||
|
struct completion qp_rel_comp;
|
||||||
unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */
|
unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -83,7 +83,7 @@ static void qedr_iw_free_qp(struct kref *ref)
|
|||||||
{
|
{
|
||||||
struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt);
|
struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt);
|
||||||
|
|
||||||
kfree(qp);
|
complete(&qp->qp_rel_comp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -1357,6 +1357,7 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
|
|||||||
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
|
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
|
||||||
kref_init(&qp->refcnt);
|
kref_init(&qp->refcnt);
|
||||||
init_completion(&qp->iwarp_cm_comp);
|
init_completion(&qp->iwarp_cm_comp);
|
||||||
|
init_completion(&qp->qp_rel_comp);
|
||||||
}
|
}
|
||||||
|
|
||||||
qp->pd = pd;
|
qp->pd = pd;
|
||||||
@ -2857,8 +2858,10 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
|
|||||||
|
|
||||||
qedr_free_qp_resources(dev, qp, udata);
|
qedr_free_qp_resources(dev, qp, udata);
|
||||||
|
|
||||||
if (rdma_protocol_iwarp(&dev->ibdev, 1))
|
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
|
||||||
qedr_iw_qp_rem_ref(&qp->ibqp);
|
qedr_iw_qp_rem_ref(&qp->ibqp);
|
||||||
|
wait_for_completion(&qp->qp_rel_comp);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -602,7 +602,7 @@ static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
|
|||||||
/*
|
/*
|
||||||
* How many pages in this iovec element?
|
* How many pages in this iovec element?
|
||||||
*/
|
*/
|
||||||
static int qib_user_sdma_num_pages(const struct iovec *iov)
|
static size_t qib_user_sdma_num_pages(const struct iovec *iov)
|
||||||
{
|
{
|
||||||
const unsigned long addr = (unsigned long) iov->iov_base;
|
const unsigned long addr = (unsigned long) iov->iov_base;
|
||||||
const unsigned long len = iov->iov_len;
|
const unsigned long len = iov->iov_len;
|
||||||
@ -658,7 +658,7 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev,
|
|||||||
static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
|
static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
|
||||||
struct qib_user_sdma_queue *pq,
|
struct qib_user_sdma_queue *pq,
|
||||||
struct qib_user_sdma_pkt *pkt,
|
struct qib_user_sdma_pkt *pkt,
|
||||||
unsigned long addr, int tlen, int npages)
|
unsigned long addr, int tlen, size_t npages)
|
||||||
{
|
{
|
||||||
struct page *pages[8];
|
struct page *pages[8];
|
||||||
int i, j;
|
int i, j;
|
||||||
@ -722,7 +722,7 @@ static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
|
|||||||
unsigned long idx;
|
unsigned long idx;
|
||||||
|
|
||||||
for (idx = 0; idx < niov; idx++) {
|
for (idx = 0; idx < niov; idx++) {
|
||||||
const int npages = qib_user_sdma_num_pages(iov + idx);
|
const size_t npages = qib_user_sdma_num_pages(iov + idx);
|
||||||
const unsigned long addr = (unsigned long) iov[idx].iov_base;
|
const unsigned long addr = (unsigned long) iov[idx].iov_base;
|
||||||
|
|
||||||
ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
|
ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
|
||||||
@ -824,8 +824,8 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
|
|||||||
unsigned pktnw;
|
unsigned pktnw;
|
||||||
unsigned pktnwc;
|
unsigned pktnwc;
|
||||||
int nfrags = 0;
|
int nfrags = 0;
|
||||||
int npages = 0;
|
size_t npages = 0;
|
||||||
int bytes_togo = 0;
|
size_t bytes_togo = 0;
|
||||||
int tiddma = 0;
|
int tiddma = 0;
|
||||||
int cfur;
|
int cfur;
|
||||||
|
|
||||||
@ -885,7 +885,11 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
|
|||||||
|
|
||||||
npages += qib_user_sdma_num_pages(&iov[idx]);
|
npages += qib_user_sdma_num_pages(&iov[idx]);
|
||||||
|
|
||||||
bytes_togo += slen;
|
if (check_add_overflow(bytes_togo, slen, &bytes_togo) ||
|
||||||
|
bytes_togo > type_max(typeof(pkt->bytes_togo))) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto free_pbc;
|
||||||
|
}
|
||||||
pktnwc += slen >> 2;
|
pktnwc += slen >> 2;
|
||||||
idx++;
|
idx++;
|
||||||
nfrags++;
|
nfrags++;
|
||||||
@ -904,8 +908,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (frag_size) {
|
if (frag_size) {
|
||||||
int tidsmsize, n;
|
size_t tidsmsize, n, pktsize, sz, addrlimit;
|
||||||
size_t pktsize;
|
|
||||||
|
|
||||||
n = npages*((2*PAGE_SIZE/frag_size)+1);
|
n = npages*((2*PAGE_SIZE/frag_size)+1);
|
||||||
pktsize = struct_size(pkt, addr, n);
|
pktsize = struct_size(pkt, addr, n);
|
||||||
@ -923,14 +926,24 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
|
|||||||
else
|
else
|
||||||
tidsmsize = 0;
|
tidsmsize = 0;
|
||||||
|
|
||||||
pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL);
|
if (check_add_overflow(pktsize, tidsmsize, &sz)) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto free_pbc;
|
||||||
|
}
|
||||||
|
pkt = kmalloc(sz, GFP_KERNEL);
|
||||||
if (!pkt) {
|
if (!pkt) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto free_pbc;
|
goto free_pbc;
|
||||||
}
|
}
|
||||||
pkt->largepkt = 1;
|
pkt->largepkt = 1;
|
||||||
pkt->frag_size = frag_size;
|
pkt->frag_size = frag_size;
|
||||||
pkt->addrlimit = n + ARRAY_SIZE(pkt->addr);
|
if (check_add_overflow(n, ARRAY_SIZE(pkt->addr),
|
||||||
|
&addrlimit) ||
|
||||||
|
addrlimit > type_max(typeof(pkt->addrlimit))) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto free_pbc;
|
||||||
|
}
|
||||||
|
pkt->addrlimit = addrlimit;
|
||||||
|
|
||||||
if (tiddma) {
|
if (tiddma) {
|
||||||
char *tidsm = (char *)pkt + pktsize;
|
char *tidsm = (char *)pkt + pktsize;
|
||||||
|
@ -1223,7 +1223,7 @@ int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
|
|||||||
spin_lock(&rdi->n_qps_lock);
|
spin_lock(&rdi->n_qps_lock);
|
||||||
if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
|
if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
|
||||||
spin_unlock(&rdi->n_qps_lock);
|
spin_unlock(&rdi->n_qps_lock);
|
||||||
ret = ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto bail_ip;
|
goto bail_ip;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -674,7 +674,7 @@ static void imx219_set_default_format(struct imx219 *imx219)
|
|||||||
|
|
||||||
fmt = &imx219->fmt;
|
fmt = &imx219->fmt;
|
||||||
fmt->code = MEDIA_BUS_FMT_SRGGB10_1X10;
|
fmt->code = MEDIA_BUS_FMT_SRGGB10_1X10;
|
||||||
fmt->colorspace = V4L2_COLORSPACE_SRGB;
|
fmt->colorspace = V4L2_COLORSPACE_RAW;
|
||||||
fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
|
fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
|
||||||
fmt->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
|
fmt->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
|
||||||
fmt->colorspace,
|
fmt->colorspace,
|
||||||
@ -844,7 +844,7 @@ static int imx219_enum_frame_size(struct v4l2_subdev *sd,
|
|||||||
|
|
||||||
static void imx219_reset_colorspace(struct v4l2_mbus_framefmt *fmt)
|
static void imx219_reset_colorspace(struct v4l2_mbus_framefmt *fmt)
|
||||||
{
|
{
|
||||||
fmt->colorspace = V4L2_COLORSPACE_SRGB;
|
fmt->colorspace = V4L2_COLORSPACE_RAW;
|
||||||
fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
|
fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
|
||||||
fmt->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
|
fmt->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
|
||||||
fmt->colorspace,
|
fmt->colorspace,
|
||||||
|
@ -885,7 +885,7 @@ static int imx290_set_fmt(struct v4l2_subdev *sd,
|
|||||||
|
|
||||||
fmt->format.code = imx290->formats[i].code;
|
fmt->format.code = imx290->formats[i].code;
|
||||||
fmt->format.field = V4L2_FIELD_NONE;
|
fmt->format.field = V4L2_FIELD_NONE;
|
||||||
fmt->format.colorspace = V4L2_COLORSPACE_SRGB;
|
fmt->format.colorspace = V4L2_COLORSPACE_RAW;
|
||||||
fmt->format.ycbcr_enc =
|
fmt->format.ycbcr_enc =
|
||||||
V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->format.colorspace);
|
V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->format.colorspace);
|
||||||
fmt->format.quantization =
|
fmt->format.quantization =
|
||||||
|
@ -1471,7 +1471,7 @@ static int imx477_enum_frame_size(struct v4l2_subdev *sd,
|
|||||||
|
|
||||||
static void imx477_reset_colorspace(struct v4l2_mbus_framefmt *fmt)
|
static void imx477_reset_colorspace(struct v4l2_mbus_framefmt *fmt)
|
||||||
{
|
{
|
||||||
fmt->colorspace = V4L2_COLORSPACE_SRGB;
|
fmt->colorspace = V4L2_COLORSPACE_RAW;
|
||||||
fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
|
fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
|
||||||
fmt->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
|
fmt->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
|
||||||
fmt->colorspace,
|
fmt->colorspace,
|
||||||
|
@ -1317,7 +1317,7 @@ static int imx519_enum_frame_size(struct v4l2_subdev *sd,
|
|||||||
|
|
||||||
static void imx519_reset_colorspace(struct v4l2_mbus_framefmt *fmt)
|
static void imx519_reset_colorspace(struct v4l2_mbus_framefmt *fmt)
|
||||||
{
|
{
|
||||||
fmt->colorspace = V4L2_COLORSPACE_SRGB;
|
fmt->colorspace = V4L2_COLORSPACE_RAW;
|
||||||
fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
|
fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
|
||||||
fmt->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
|
fmt->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
|
||||||
fmt->colorspace,
|
fmt->colorspace,
|
||||||
|
@ -494,7 +494,7 @@ static const struct ov5647_mode ov5647_modes[] = {
|
|||||||
{
|
{
|
||||||
.format = {
|
.format = {
|
||||||
.code = MEDIA_BUS_FMT_SBGGR10_1X10,
|
.code = MEDIA_BUS_FMT_SBGGR10_1X10,
|
||||||
.colorspace = V4L2_COLORSPACE_SRGB,
|
.colorspace = V4L2_COLORSPACE_RAW,
|
||||||
.field = V4L2_FIELD_NONE,
|
.field = V4L2_FIELD_NONE,
|
||||||
.width = 2592,
|
.width = 2592,
|
||||||
.height = 1944
|
.height = 1944
|
||||||
@ -515,7 +515,7 @@ static const struct ov5647_mode ov5647_modes[] = {
|
|||||||
{
|
{
|
||||||
.format = {
|
.format = {
|
||||||
.code = MEDIA_BUS_FMT_SBGGR10_1X10,
|
.code = MEDIA_BUS_FMT_SBGGR10_1X10,
|
||||||
.colorspace = V4L2_COLORSPACE_SRGB,
|
.colorspace = V4L2_COLORSPACE_RAW,
|
||||||
.field = V4L2_FIELD_NONE,
|
.field = V4L2_FIELD_NONE,
|
||||||
.width = 1920,
|
.width = 1920,
|
||||||
.height = 1080
|
.height = 1080
|
||||||
@ -536,7 +536,7 @@ static const struct ov5647_mode ov5647_modes[] = {
|
|||||||
{
|
{
|
||||||
.format = {
|
.format = {
|
||||||
.code = MEDIA_BUS_FMT_SBGGR10_1X10,
|
.code = MEDIA_BUS_FMT_SBGGR10_1X10,
|
||||||
.colorspace = V4L2_COLORSPACE_SRGB,
|
.colorspace = V4L2_COLORSPACE_RAW,
|
||||||
.field = V4L2_FIELD_NONE,
|
.field = V4L2_FIELD_NONE,
|
||||||
.width = 1296,
|
.width = 1296,
|
||||||
.height = 972
|
.height = 972
|
||||||
@ -557,7 +557,7 @@ static const struct ov5647_mode ov5647_modes[] = {
|
|||||||
{
|
{
|
||||||
.format = {
|
.format = {
|
||||||
.code = MEDIA_BUS_FMT_SBGGR10_1X10,
|
.code = MEDIA_BUS_FMT_SBGGR10_1X10,
|
||||||
.colorspace = V4L2_COLORSPACE_SRGB,
|
.colorspace = V4L2_COLORSPACE_RAW,
|
||||||
.field = V4L2_FIELD_NONE,
|
.field = V4L2_FIELD_NONE,
|
||||||
.width = 640,
|
.width = 640,
|
||||||
.height = 480
|
.height = 480
|
||||||
|
@ -507,7 +507,7 @@ static int ov9281_set_fmt(struct v4l2_subdev *sd,
|
|||||||
fmt->format.width = mode->width;
|
fmt->format.width = mode->width;
|
||||||
fmt->format.height = mode->height;
|
fmt->format.height = mode->height;
|
||||||
fmt->format.field = V4L2_FIELD_NONE;
|
fmt->format.field = V4L2_FIELD_NONE;
|
||||||
fmt->format.colorspace = V4L2_COLORSPACE_SRGB;
|
fmt->format.colorspace = V4L2_COLORSPACE_RAW;
|
||||||
fmt->format.ycbcr_enc =
|
fmt->format.ycbcr_enc =
|
||||||
V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->format.colorspace);
|
V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->format.colorspace);
|
||||||
fmt->format.quantization =
|
fmt->format.quantization =
|
||||||
@ -558,7 +558,7 @@ static int ov9281_get_fmt(struct v4l2_subdev *sd,
|
|||||||
fmt->format.height = mode->height;
|
fmt->format.height = mode->height;
|
||||||
fmt->format.code = ov9281->code;
|
fmt->format.code = ov9281->code;
|
||||||
fmt->format.field = V4L2_FIELD_NONE;
|
fmt->format.field = V4L2_FIELD_NONE;
|
||||||
fmt->format.colorspace = V4L2_COLORSPACE_SRGB;
|
fmt->format.colorspace = V4L2_COLORSPACE_RAW;
|
||||||
fmt->format.ycbcr_enc =
|
fmt->format.ycbcr_enc =
|
||||||
V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->format.colorspace);
|
V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->format.colorspace);
|
||||||
fmt->format.quantization =
|
fmt->format.quantization =
|
||||||
@ -911,7 +911,7 @@ static int ov9281_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
|
|||||||
try_fmt->height = def_mode->height;
|
try_fmt->height = def_mode->height;
|
||||||
try_fmt->code = MEDIA_BUS_FMT_Y10_1X10;
|
try_fmt->code = MEDIA_BUS_FMT_Y10_1X10;
|
||||||
try_fmt->field = V4L2_FIELD_NONE;
|
try_fmt->field = V4L2_FIELD_NONE;
|
||||||
try_fmt->colorspace = V4L2_COLORSPACE_SRGB;
|
try_fmt->colorspace = V4L2_COLORSPACE_RAW;
|
||||||
try_fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(try_fmt->colorspace);
|
try_fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(try_fmt->colorspace);
|
||||||
try_fmt->quantization =
|
try_fmt->quantization =
|
||||||
V4L2_MAP_QUANTIZATION_DEFAULT(true, try_fmt->colorspace,
|
V4L2_MAP_QUANTIZATION_DEFAULT(true, try_fmt->colorspace,
|
||||||
|
@ -333,26 +333,6 @@ static const struct attribute_group *pmem_attribute_groups[] = {
|
|||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void pmem_pagemap_cleanup(struct dev_pagemap *pgmap)
|
|
||||||
{
|
|
||||||
struct pmem_device *pmem = pgmap->owner;
|
|
||||||
|
|
||||||
blk_cleanup_disk(pmem->disk);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pmem_release_queue(void *pgmap)
|
|
||||||
{
|
|
||||||
pmem_pagemap_cleanup(pgmap);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pmem_pagemap_kill(struct dev_pagemap *pgmap)
|
|
||||||
{
|
|
||||||
struct request_queue *q =
|
|
||||||
container_of(pgmap->ref, struct request_queue, q_usage_counter);
|
|
||||||
|
|
||||||
blk_freeze_queue_start(q);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pmem_release_disk(void *__pmem)
|
static void pmem_release_disk(void *__pmem)
|
||||||
{
|
{
|
||||||
struct pmem_device *pmem = __pmem;
|
struct pmem_device *pmem = __pmem;
|
||||||
@ -360,12 +340,9 @@ static void pmem_release_disk(void *__pmem)
|
|||||||
kill_dax(pmem->dax_dev);
|
kill_dax(pmem->dax_dev);
|
||||||
put_dax(pmem->dax_dev);
|
put_dax(pmem->dax_dev);
|
||||||
del_gendisk(pmem->disk);
|
del_gendisk(pmem->disk);
|
||||||
}
|
|
||||||
|
|
||||||
static const struct dev_pagemap_ops fsdax_pagemap_ops = {
|
blk_cleanup_disk(pmem->disk);
|
||||||
.kill = pmem_pagemap_kill,
|
}
|
||||||
.cleanup = pmem_pagemap_cleanup,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int pmem_attach_disk(struct device *dev,
|
static int pmem_attach_disk(struct device *dev,
|
||||||
struct nd_namespace_common *ndns)
|
struct nd_namespace_common *ndns)
|
||||||
@ -427,10 +404,8 @@ static int pmem_attach_disk(struct device *dev,
|
|||||||
pmem->disk = disk;
|
pmem->disk = disk;
|
||||||
pmem->pgmap.owner = pmem;
|
pmem->pgmap.owner = pmem;
|
||||||
pmem->pfn_flags = PFN_DEV;
|
pmem->pfn_flags = PFN_DEV;
|
||||||
pmem->pgmap.ref = &q->q_usage_counter;
|
|
||||||
if (is_nd_pfn(dev)) {
|
if (is_nd_pfn(dev)) {
|
||||||
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
|
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
|
||||||
pmem->pgmap.ops = &fsdax_pagemap_ops;
|
|
||||||
addr = devm_memremap_pages(dev, &pmem->pgmap);
|
addr = devm_memremap_pages(dev, &pmem->pgmap);
|
||||||
pfn_sb = nd_pfn->pfn_sb;
|
pfn_sb = nd_pfn->pfn_sb;
|
||||||
pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
|
pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
|
||||||
@ -444,16 +419,12 @@ static int pmem_attach_disk(struct device *dev,
|
|||||||
pmem->pgmap.range.end = res->end;
|
pmem->pgmap.range.end = res->end;
|
||||||
pmem->pgmap.nr_range = 1;
|
pmem->pgmap.nr_range = 1;
|
||||||
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
|
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
|
||||||
pmem->pgmap.ops = &fsdax_pagemap_ops;
|
|
||||||
addr = devm_memremap_pages(dev, &pmem->pgmap);
|
addr = devm_memremap_pages(dev, &pmem->pgmap);
|
||||||
pmem->pfn_flags |= PFN_MAP;
|
pmem->pfn_flags |= PFN_MAP;
|
||||||
bb_range = pmem->pgmap.range;
|
bb_range = pmem->pgmap.range;
|
||||||
} else {
|
} else {
|
||||||
addr = devm_memremap(dev, pmem->phys_addr,
|
addr = devm_memremap(dev, pmem->phys_addr,
|
||||||
pmem->size, ARCH_MEMREMAP_PMEM);
|
pmem->size, ARCH_MEMREMAP_PMEM);
|
||||||
if (devm_add_action_or_reset(dev, pmem_release_queue,
|
|
||||||
&pmem->pgmap))
|
|
||||||
return -ENOMEM;
|
|
||||||
bb_range.start = res->start;
|
bb_range.start = res->start;
|
||||||
bb_range.end = res->end;
|
bb_range.end = res->end;
|
||||||
}
|
}
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
#include <linux/sort.h>
|
#include <linux/sort.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/memblock.h>
|
#include <linux/memblock.h>
|
||||||
|
#include <linux/kmemleak.h>
|
||||||
|
|
||||||
#include "of_private.h"
|
#include "of_private.h"
|
||||||
|
|
||||||
@ -46,6 +47,7 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
|
|||||||
err = memblock_mark_nomap(base, size);
|
err = memblock_mark_nomap(base, size);
|
||||||
if (err)
|
if (err)
|
||||||
memblock_free(base, size);
|
memblock_free(base, size);
|
||||||
|
kmemleak_ignore_phys(base);
|
||||||
}
|
}
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
@ -5,7 +5,6 @@
|
|||||||
|
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/mfd/syscon.h>
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
#include <linux/of_device.h>
|
#include <linux/of_device.h>
|
||||||
@ -13,7 +12,6 @@
|
|||||||
#include <linux/pinctrl/pinctrl.h>
|
#include <linux/pinctrl/pinctrl.h>
|
||||||
#include <linux/pinctrl/pinmux.h>
|
#include <linux/pinctrl/pinmux.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
#include <linux/regmap.h>
|
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
|
||||||
#define FLAG_BCM4708 BIT(1)
|
#define FLAG_BCM4708 BIT(1)
|
||||||
@ -24,8 +22,7 @@ struct ns_pinctrl {
|
|||||||
struct device *dev;
|
struct device *dev;
|
||||||
unsigned int chipset_flag;
|
unsigned int chipset_flag;
|
||||||
struct pinctrl_dev *pctldev;
|
struct pinctrl_dev *pctldev;
|
||||||
struct regmap *regmap;
|
void __iomem *base;
|
||||||
u32 offset;
|
|
||||||
|
|
||||||
struct pinctrl_desc pctldesc;
|
struct pinctrl_desc pctldesc;
|
||||||
struct ns_pinctrl_group *groups;
|
struct ns_pinctrl_group *groups;
|
||||||
@ -232,9 +229,9 @@ static int ns_pinctrl_set_mux(struct pinctrl_dev *pctrl_dev,
|
|||||||
unset |= BIT(pin_number);
|
unset |= BIT(pin_number);
|
||||||
}
|
}
|
||||||
|
|
||||||
regmap_read(ns_pinctrl->regmap, ns_pinctrl->offset, &tmp);
|
tmp = readl(ns_pinctrl->base);
|
||||||
tmp &= ~unset;
|
tmp &= ~unset;
|
||||||
regmap_write(ns_pinctrl->regmap, ns_pinctrl->offset, tmp);
|
writel(tmp, ns_pinctrl->base);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -266,13 +263,13 @@ static const struct of_device_id ns_pinctrl_of_match_table[] = {
|
|||||||
static int ns_pinctrl_probe(struct platform_device *pdev)
|
static int ns_pinctrl_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct device *dev = &pdev->dev;
|
struct device *dev = &pdev->dev;
|
||||||
struct device_node *np = dev->of_node;
|
|
||||||
const struct of_device_id *of_id;
|
const struct of_device_id *of_id;
|
||||||
struct ns_pinctrl *ns_pinctrl;
|
struct ns_pinctrl *ns_pinctrl;
|
||||||
struct pinctrl_desc *pctldesc;
|
struct pinctrl_desc *pctldesc;
|
||||||
struct pinctrl_pin_desc *pin;
|
struct pinctrl_pin_desc *pin;
|
||||||
struct ns_pinctrl_group *group;
|
struct ns_pinctrl_group *group;
|
||||||
struct ns_pinctrl_function *function;
|
struct ns_pinctrl_function *function;
|
||||||
|
struct resource *res;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
ns_pinctrl = devm_kzalloc(dev, sizeof(*ns_pinctrl), GFP_KERNEL);
|
ns_pinctrl = devm_kzalloc(dev, sizeof(*ns_pinctrl), GFP_KERNEL);
|
||||||
@ -290,18 +287,12 @@ static int ns_pinctrl_probe(struct platform_device *pdev)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
ns_pinctrl->chipset_flag = (uintptr_t)of_id->data;
|
ns_pinctrl->chipset_flag = (uintptr_t)of_id->data;
|
||||||
|
|
||||||
ns_pinctrl->regmap = syscon_node_to_regmap(of_get_parent(np));
|
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
|
||||||
if (IS_ERR(ns_pinctrl->regmap)) {
|
"cru_gpio_control");
|
||||||
int err = PTR_ERR(ns_pinctrl->regmap);
|
ns_pinctrl->base = devm_ioremap_resource(dev, res);
|
||||||
|
if (IS_ERR(ns_pinctrl->base)) {
|
||||||
dev_err(dev, "Failed to map pinctrl regs: %d\n", err);
|
dev_err(dev, "Failed to map pinctrl regs\n");
|
||||||
|
return PTR_ERR(ns_pinctrl->base);
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (of_property_read_u32(np, "offset", &ns_pinctrl->offset)) {
|
|
||||||
dev_err(dev, "Failed to get register offset\n");
|
|
||||||
return -ENOENT;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(pctldesc, &ns_pinctrl_desc, sizeof(*pctldesc));
|
memcpy(pctldesc, &ns_pinctrl_desc, sizeof(*pctldesc));
|
||||||
|
@ -840,6 +840,34 @@ static const struct pinconf_ops amd_pinconf_ops = {
|
|||||||
.pin_config_group_set = amd_pinconf_group_set,
|
.pin_config_group_set = amd_pinconf_group_set,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
|
||||||
|
{
|
||||||
|
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
|
||||||
|
unsigned long flags;
|
||||||
|
u32 pin_reg, mask;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
|
||||||
|
BIT(INTERRUPT_MASK_OFF) | BIT(INTERRUPT_ENABLE_OFF) |
|
||||||
|
BIT(WAKE_CNTRL_OFF_S4);
|
||||||
|
|
||||||
|
for (i = 0; i < desc->npins; i++) {
|
||||||
|
int pin = desc->pins[i].number;
|
||||||
|
const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin);
|
||||||
|
|
||||||
|
if (!pd)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
|
||||||
|
|
||||||
|
pin_reg = readl(gpio_dev->base + i * 4);
|
||||||
|
pin_reg &= ~mask;
|
||||||
|
writel(pin_reg, gpio_dev->base + i * 4);
|
||||||
|
|
||||||
|
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PM_SLEEP
|
#ifdef CONFIG_PM_SLEEP
|
||||||
static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
|
static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
|
||||||
{
|
{
|
||||||
@ -976,6 +1004,9 @@ static int amd_gpio_probe(struct platform_device *pdev)
|
|||||||
return PTR_ERR(gpio_dev->pctrl);
|
return PTR_ERR(gpio_dev->pctrl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Disable and mask interrupts */
|
||||||
|
amd_gpio_irq_init(gpio_dev);
|
||||||
|
|
||||||
girq = &gpio_dev->gc.irq;
|
girq = &gpio_dev->gc.irq;
|
||||||
girq->chip = &amd_gpio_irqchip;
|
girq->chip = &amd_gpio_irqchip;
|
||||||
/* This will let us handle the parent IRQ in the driver */
|
/* This will let us handle the parent IRQ in the driver */
|
||||||
|
@ -1644,8 +1644,8 @@ int __maybe_unused stm32_pinctrl_resume(struct device *dev)
|
|||||||
struct stm32_pinctrl_group *g = pctl->groups;
|
struct stm32_pinctrl_group *g = pctl->groups;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = g->pin; i < g->pin + pctl->ngroups; i++)
|
for (i = 0; i < pctl->ngroups; i++, g++)
|
||||||
stm32_pinctrl_restore_gpio_regs(pctl, i);
|
stm32_pinctrl_restore_gpio_regs(pctl, g->pin);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -170,6 +170,7 @@ static void ptp_clock_release(struct device *dev)
|
|||||||
struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
|
struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
|
||||||
|
|
||||||
ptp_cleanup_pin_groups(ptp);
|
ptp_cleanup_pin_groups(ptp);
|
||||||
|
kfree(ptp->vclock_index);
|
||||||
mutex_destroy(&ptp->tsevq_mux);
|
mutex_destroy(&ptp->tsevq_mux);
|
||||||
mutex_destroy(&ptp->pincfg_mux);
|
mutex_destroy(&ptp->pincfg_mux);
|
||||||
mutex_destroy(&ptp->n_vclocks_mux);
|
mutex_destroy(&ptp->n_vclocks_mux);
|
||||||
@ -283,15 +284,20 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
|
|||||||
/* Create a posix clock and link it to the device. */
|
/* Create a posix clock and link it to the device. */
|
||||||
err = posix_clock_register(&ptp->clock, &ptp->dev);
|
err = posix_clock_register(&ptp->clock, &ptp->dev);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
if (ptp->pps_source)
|
||||||
|
pps_unregister_source(ptp->pps_source);
|
||||||
|
|
||||||
|
if (ptp->kworker)
|
||||||
|
kthread_destroy_worker(ptp->kworker);
|
||||||
|
|
||||||
|
put_device(&ptp->dev);
|
||||||
|
|
||||||
pr_err("failed to create posix clock\n");
|
pr_err("failed to create posix clock\n");
|
||||||
goto no_clock;
|
return ERR_PTR(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ptp;
|
return ptp;
|
||||||
|
|
||||||
no_clock:
|
|
||||||
if (ptp->pps_source)
|
|
||||||
pps_unregister_source(ptp->pps_source);
|
|
||||||
no_pps:
|
no_pps:
|
||||||
ptp_cleanup_pin_groups(ptp);
|
ptp_cleanup_pin_groups(ptp);
|
||||||
no_pin_groups:
|
no_pin_groups:
|
||||||
@ -321,8 +327,6 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
|
|||||||
ptp->defunct = 1;
|
ptp->defunct = 1;
|
||||||
wake_up_interruptible(&ptp->tsev_wq);
|
wake_up_interruptible(&ptp->tsev_wq);
|
||||||
|
|
||||||
kfree(ptp->vclock_index);
|
|
||||||
|
|
||||||
if (ptp->kworker) {
|
if (ptp->kworker) {
|
||||||
kthread_cancel_delayed_work_sync(&ptp->aux_work);
|
kthread_cancel_delayed_work_sync(&ptp->aux_work);
|
||||||
kthread_destroy_worker(ptp->kworker);
|
kthread_destroy_worker(ptp->kworker);
|
||||||
|
@ -31,10 +31,10 @@ int kvm_arch_ptp_init(void)
|
|||||||
|
|
||||||
ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
|
ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
|
||||||
KVM_CLOCK_PAIRING_WALLCLOCK);
|
KVM_CLOCK_PAIRING_WALLCLOCK);
|
||||||
if (ret == -KVM_ENOSYS || ret == -KVM_EOPNOTSUPP)
|
if (ret == -KVM_ENOSYS)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arch_ptp_get_clock(struct timespec64 *ts)
|
int kvm_arch_ptp_get_clock(struct timespec64 *ts)
|
||||||
|
@ -220,7 +220,8 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
|
|||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
shost->cmd_per_lun = min_t(short, shost->cmd_per_lun,
|
/* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
|
||||||
|
shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
|
||||||
shost->can_queue);
|
shost->can_queue);
|
||||||
|
|
||||||
error = scsi_init_sense_cache(shost);
|
error = scsi_init_sense_cache(shost);
|
||||||
|
@ -1696,6 +1696,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
|
|||||||
|
|
||||||
spin_lock_irqsave(&evt->queue->l_lock, flags);
|
spin_lock_irqsave(&evt->queue->l_lock, flags);
|
||||||
list_add_tail(&evt->queue_list, &evt->queue->sent);
|
list_add_tail(&evt->queue_list, &evt->queue->sent);
|
||||||
|
atomic_set(&evt->active, 1);
|
||||||
|
|
||||||
mb();
|
mb();
|
||||||
|
|
||||||
@ -1710,6 +1711,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
|
|||||||
be64_to_cpu(crq_as_u64[1]));
|
be64_to_cpu(crq_as_u64[1]));
|
||||||
|
|
||||||
if (rc) {
|
if (rc) {
|
||||||
|
atomic_set(&evt->active, 0);
|
||||||
list_del(&evt->queue_list);
|
list_del(&evt->queue_list);
|
||||||
spin_unlock_irqrestore(&evt->queue->l_lock, flags);
|
spin_unlock_irqrestore(&evt->queue->l_lock, flags);
|
||||||
del_timer(&evt->timer);
|
del_timer(&evt->timer);
|
||||||
@ -1737,7 +1739,6 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
|
|||||||
|
|
||||||
evt->done(evt);
|
evt->done(evt);
|
||||||
} else {
|
} else {
|
||||||
atomic_set(&evt->active, 1);
|
|
||||||
spin_unlock_irqrestore(&evt->queue->l_lock, flags);
|
spin_unlock_irqrestore(&evt->queue->l_lock, flags);
|
||||||
ibmvfc_trc_start(evt);
|
ibmvfc_trc_start(evt);
|
||||||
}
|
}
|
||||||
|
@ -5065,9 +5065,12 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
|
|||||||
if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
|
if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
|
||||||
eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
|
eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
|
||||||
|
|
||||||
if (scmd->prot_flags & SCSI_PROT_REF_CHECK) {
|
if (scmd->prot_flags & SCSI_PROT_REF_CHECK)
|
||||||
eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
|
eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
|
||||||
MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
|
|
||||||
|
if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) {
|
||||||
|
eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG;
|
||||||
|
|
||||||
mpi_request->CDB.EEDP32.PrimaryReferenceTag =
|
mpi_request->CDB.EEDP32.PrimaryReferenceTag =
|
||||||
cpu_to_be32(scsi_prot_ref_tag(scmd));
|
cpu_to_be32(scsi_prot_ref_tag(scmd));
|
||||||
}
|
}
|
||||||
|
@ -4157,7 +4157,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
|
|||||||
ql_dbg_pci(ql_dbg_init, ha->pdev,
|
ql_dbg_pci(ql_dbg_init, ha->pdev,
|
||||||
0xe0ee, "%s: failed alloc dsd\n",
|
0xe0ee, "%s: failed alloc dsd\n",
|
||||||
__func__);
|
__func__);
|
||||||
return 1;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
ha->dif_bundle_kallocs++;
|
ha->dif_bundle_kallocs++;
|
||||||
|
|
||||||
|
@ -3319,8 +3319,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
|
|||||||
"RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
|
"RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
|
||||||
vha->flags.online, qla2x00_reset_active(vha),
|
vha->flags.online, qla2x00_reset_active(vha),
|
||||||
cmd->reset_count, qpair->chip_reset);
|
cmd->reset_count, qpair->chip_reset);
|
||||||
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
|
goto out_unmap_unlock;
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Does F/W have an IOCBs for this request */
|
/* Does F/W have an IOCBs for this request */
|
||||||
@ -3445,10 +3444,6 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
|
|||||||
prm.sg = NULL;
|
prm.sg = NULL;
|
||||||
prm.req_cnt = 1;
|
prm.req_cnt = 1;
|
||||||
|
|
||||||
/* Calculate number of entries and segments required */
|
|
||||||
if (qlt_pci_map_calc_cnt(&prm) != 0)
|
|
||||||
return -EAGAIN;
|
|
||||||
|
|
||||||
if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
|
if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
|
||||||
(cmd->sess && cmd->sess->deleted)) {
|
(cmd->sess && cmd->sess->deleted)) {
|
||||||
/*
|
/*
|
||||||
@ -3466,6 +3461,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Calculate number of entries and segments required */
|
||||||
|
if (qlt_pci_map_calc_cnt(&prm) != 0)
|
||||||
|
return -EAGAIN;
|
||||||
|
|
||||||
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
|
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
|
||||||
/* Does F/W have an IOCBs for this request */
|
/* Does F/W have an IOCBs for this request */
|
||||||
res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
|
res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
|
||||||
@ -3870,9 +3869,6 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
|
|||||||
|
|
||||||
BUG_ON(cmd->cmd_in_wq);
|
BUG_ON(cmd->cmd_in_wq);
|
||||||
|
|
||||||
if (cmd->sg_mapped)
|
|
||||||
qlt_unmap_sg(cmd->vha, cmd);
|
|
||||||
|
|
||||||
if (!cmd->q_full)
|
if (!cmd->q_full)
|
||||||
qlt_decr_num_pend_cmds(cmd->vha);
|
qlt_decr_num_pend_cmds(cmd->vha);
|
||||||
|
|
||||||
|
@ -553,8 +553,10 @@ EXPORT_SYMBOL(scsi_device_get);
|
|||||||
*/
|
*/
|
||||||
void scsi_device_put(struct scsi_device *sdev)
|
void scsi_device_put(struct scsi_device *sdev)
|
||||||
{
|
{
|
||||||
module_put(sdev->host->hostt->module);
|
struct module *mod = sdev->host->hostt->module;
|
||||||
|
|
||||||
put_device(&sdev->sdev_gendev);
|
put_device(&sdev->sdev_gendev);
|
||||||
|
module_put(mod);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(scsi_device_put);
|
EXPORT_SYMBOL(scsi_device_put);
|
||||||
|
|
||||||
|
@ -449,9 +449,12 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
|
|||||||
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
|
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
|
||||||
struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
|
struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
struct module *mod;
|
||||||
|
|
||||||
sdev = container_of(work, struct scsi_device, ew.work);
|
sdev = container_of(work, struct scsi_device, ew.work);
|
||||||
|
|
||||||
|
mod = sdev->host->hostt->module;
|
||||||
|
|
||||||
scsi_dh_release_device(sdev);
|
scsi_dh_release_device(sdev);
|
||||||
|
|
||||||
parent = sdev->sdev_gendev.parent;
|
parent = sdev->sdev_gendev.parent;
|
||||||
@ -502,11 +505,17 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
|
|||||||
|
|
||||||
if (parent)
|
if (parent)
|
||||||
put_device(parent);
|
put_device(parent);
|
||||||
|
module_put(mod);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void scsi_device_dev_release(struct device *dev)
|
static void scsi_device_dev_release(struct device *dev)
|
||||||
{
|
{
|
||||||
struct scsi_device *sdp = to_scsi_device(dev);
|
struct scsi_device *sdp = to_scsi_device(dev);
|
||||||
|
|
||||||
|
/* Set module pointer as NULL in case of module unloading */
|
||||||
|
if (!try_module_get(sdp->host->hostt->module))
|
||||||
|
sdp->host->hostt->module = NULL;
|
||||||
|
|
||||||
execute_in_process_context(scsi_device_dev_release_usercontext,
|
execute_in_process_context(scsi_device_dev_release_usercontext,
|
||||||
&sdp->ew);
|
&sdp->ew);
|
||||||
}
|
}
|
||||||
|
@ -2930,8 +2930,6 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
|
|||||||
session->recovery_tmo = value;
|
session->recovery_tmo = value;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
err = transport->set_param(conn, ev->u.set_param.param,
|
|
||||||
data, ev->u.set_param.len);
|
|
||||||
if ((conn->state == ISCSI_CONN_BOUND) ||
|
if ((conn->state == ISCSI_CONN_BOUND) ||
|
||||||
(conn->state == ISCSI_CONN_UP)) {
|
(conn->state == ISCSI_CONN_UP)) {
|
||||||
err = transport->set_param(conn, ev->u.set_param.param,
|
err = transport->set_param(conn, ev->u.set_param.param,
|
||||||
|
@ -3683,7 +3683,12 @@ static int sd_resume(struct device *dev)
|
|||||||
static int sd_resume_runtime(struct device *dev)
|
static int sd_resume_runtime(struct device *dev)
|
||||||
{
|
{
|
||||||
struct scsi_disk *sdkp = dev_get_drvdata(dev);
|
struct scsi_disk *sdkp = dev_get_drvdata(dev);
|
||||||
struct scsi_device *sdp = sdkp->device;
|
struct scsi_device *sdp;
|
||||||
|
|
||||||
|
if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
sdp = sdkp->device;
|
||||||
|
|
||||||
if (sdp->ignore_media_change) {
|
if (sdp->ignore_media_change) {
|
||||||
/* clear the device's sense data */
|
/* clear the device's sense data */
|
||||||
|
@ -1285,11 +1285,15 @@ static void storvsc_on_channel_callback(void *context)
|
|||||||
foreach_vmbus_pkt(desc, channel) {
|
foreach_vmbus_pkt(desc, channel) {
|
||||||
struct vstor_packet *packet = hv_pkt_data(desc);
|
struct vstor_packet *packet = hv_pkt_data(desc);
|
||||||
struct storvsc_cmd_request *request = NULL;
|
struct storvsc_cmd_request *request = NULL;
|
||||||
|
u32 pktlen = hv_pkt_datalen(desc);
|
||||||
u64 rqst_id = desc->trans_id;
|
u64 rqst_id = desc->trans_id;
|
||||||
|
u32 minlen = rqst_id ? sizeof(struct vstor_packet) -
|
||||||
|
stor_device->vmscsi_size_delta : sizeof(enum vstor_packet_operation);
|
||||||
|
|
||||||
if (hv_pkt_datalen(desc) < sizeof(struct vstor_packet) -
|
if (pktlen < minlen) {
|
||||||
stor_device->vmscsi_size_delta) {
|
dev_err(&device->device,
|
||||||
dev_err(&device->device, "Invalid packet len\n");
|
"Invalid pkt: id=%llu, len=%u, minlen=%u\n",
|
||||||
|
rqst_id, pktlen, minlen);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1302,13 +1306,23 @@ static void storvsc_on_channel_callback(void *context)
|
|||||||
if (rqst_id == 0) {
|
if (rqst_id == 0) {
|
||||||
/*
|
/*
|
||||||
* storvsc_on_receive() looks at the vstor_packet in the message
|
* storvsc_on_receive() looks at the vstor_packet in the message
|
||||||
* from the ring buffer. If the operation in the vstor_packet is
|
* from the ring buffer.
|
||||||
* COMPLETE_IO, then we call storvsc_on_io_completion(), and
|
*
|
||||||
* dereference the guest memory address. Make sure we don't call
|
* - If the operation in the vstor_packet is COMPLETE_IO, then
|
||||||
* storvsc_on_io_completion() with a guest memory address that is
|
* we call storvsc_on_io_completion(), and dereference the
|
||||||
* zero if Hyper-V were to construct and send such a bogus packet.
|
* guest memory address. Make sure we don't call
|
||||||
|
* storvsc_on_io_completion() with a guest memory address
|
||||||
|
* that is zero if Hyper-V were to construct and send such
|
||||||
|
* a bogus packet.
|
||||||
|
*
|
||||||
|
* - If the operation in the vstor_packet is FCHBA_DATA, then
|
||||||
|
* we call cache_wwn(), and access the data payload area of
|
||||||
|
* the packet (wwn_packet); however, there is no guarantee
|
||||||
|
* that the packet is big enough to contain such area.
|
||||||
|
* Future-proof the code by rejecting such a bogus packet.
|
||||||
*/
|
*/
|
||||||
if (packet->operation == VSTOR_OPERATION_COMPLETE_IO) {
|
if (packet->operation == VSTOR_OPERATION_COMPLETE_IO ||
|
||||||
|
packet->operation == VSTOR_OPERATION_FCHBA_DATA) {
|
||||||
dev_err(&device->device, "Invalid packet with ID of 0\n");
|
dev_err(&device->device, "Invalid packet with ID of 0\n");
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -642,9 +642,9 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* setting for three timeout values for traffic class #0 */
|
/* setting for three timeout values for traffic class #0 */
|
||||||
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 8064);
|
ufshcd_dme_set(hba, UIC_ARG_MIB(DL_FC0PROTTIMEOUTVAL), 8064);
|
||||||
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 28224);
|
ufshcd_dme_set(hba, UIC_ARG_MIB(DL_TC0REPLAYTIMEOUTVAL), 28224);
|
||||||
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 20160);
|
ufshcd_dme_set(hba, UIC_ARG_MIB(DL_AFC0REQTIMEOUTVAL), 20160);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
out:
|
out:
|
||||||
|
@ -370,20 +370,6 @@ static void ufs_intel_common_exit(struct ufs_hba *hba)
|
|||||||
|
|
||||||
static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
|
static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
* To support S4 (suspend-to-disk) with spm_lvl other than 5, the base
|
|
||||||
* address registers must be restored because the restore kernel can
|
|
||||||
* have used different addresses.
|
|
||||||
*/
|
|
||||||
ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
|
|
||||||
REG_UTP_TRANSFER_REQ_LIST_BASE_L);
|
|
||||||
ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
|
|
||||||
REG_UTP_TRANSFER_REQ_LIST_BASE_H);
|
|
||||||
ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
|
|
||||||
REG_UTP_TASK_REQ_LIST_BASE_L);
|
|
||||||
ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
|
|
||||||
REG_UTP_TASK_REQ_LIST_BASE_H);
|
|
||||||
|
|
||||||
if (ufshcd_is_link_hibern8(hba)) {
|
if (ufshcd_is_link_hibern8(hba)) {
|
||||||
int ret = ufshcd_uic_hibern8_exit(hba);
|
int ret = ufshcd_uic_hibern8_exit(hba);
|
||||||
|
|
||||||
@ -463,6 +449,18 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
|
|||||||
.device_reset = ufs_intel_device_reset,
|
.device_reset = ufs_intel_device_reset,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifdef CONFIG_PM_SLEEP
|
||||||
|
static int ufshcd_pci_restore(struct device *dev)
|
||||||
|
{
|
||||||
|
struct ufs_hba *hba = dev_get_drvdata(dev);
|
||||||
|
|
||||||
|
/* Force a full reset and restore */
|
||||||
|
ufshcd_set_link_off(hba);
|
||||||
|
|
||||||
|
return ufshcd_system_resume(dev);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ufshcd_pci_shutdown - main function to put the controller in reset state
|
* ufshcd_pci_shutdown - main function to put the controller in reset state
|
||||||
* @pdev: pointer to PCI device handle
|
* @pdev: pointer to PCI device handle
|
||||||
@ -546,9 +544,14 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct dev_pm_ops ufshcd_pci_pm_ops = {
|
static const struct dev_pm_ops ufshcd_pci_pm_ops = {
|
||||||
SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
|
|
||||||
SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
|
SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
|
||||||
#ifdef CONFIG_PM_SLEEP
|
#ifdef CONFIG_PM_SLEEP
|
||||||
|
.suspend = ufshcd_system_suspend,
|
||||||
|
.resume = ufshcd_system_resume,
|
||||||
|
.freeze = ufshcd_system_suspend,
|
||||||
|
.thaw = ufshcd_system_resume,
|
||||||
|
.poweroff = ufshcd_system_suspend,
|
||||||
|
.restore = ufshcd_pci_restore,
|
||||||
.prepare = ufshcd_suspend_prepare,
|
.prepare = ufshcd_suspend_prepare,
|
||||||
.complete = ufshcd_resume_complete,
|
.complete = ufshcd_resume_complete,
|
||||||
#endif
|
#endif
|
||||||
|
@ -2737,12 +2737,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
|||||||
|
|
||||||
lrbp->req_abort_skip = false;
|
lrbp->req_abort_skip = false;
|
||||||
|
|
||||||
err = ufshpb_prep(hba, lrbp);
|
ufshpb_prep(hba, lrbp);
|
||||||
if (err == -EAGAIN) {
|
|
||||||
lrbp->cmd = NULL;
|
|
||||||
ufshcd_release(hba);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
ufshcd_comp_scsi_upiu(hba, lrbp);
|
ufshcd_comp_scsi_upiu(hba, lrbp);
|
||||||
|
|
||||||
|
@ -84,16 +84,6 @@ static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
|
|||||||
return transfer_len <= hpb->pre_req_max_tr_len;
|
return transfer_len <= hpb->pre_req_max_tr_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* In this driver, WRITE_BUFFER CMD support 36KB (len=9) ~ 1MB (len=256) as
|
|
||||||
* default. It is possible to change range of transfer_len through sysfs.
|
|
||||||
*/
|
|
||||||
static inline bool ufshpb_is_required_wb(struct ufshpb_lu *hpb, int len)
|
|
||||||
{
|
|
||||||
return len > hpb->pre_req_min_tr_len &&
|
|
||||||
len <= hpb->pre_req_max_tr_len;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool ufshpb_is_general_lun(int lun)
|
static bool ufshpb_is_general_lun(int lun)
|
||||||
{
|
{
|
||||||
return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
|
return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
|
||||||
@ -334,7 +324,7 @@ ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
|
|||||||
|
|
||||||
static void
|
static void
|
||||||
ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
|
ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
|
||||||
__be64 ppn, u8 transfer_len, int read_id)
|
__be64 ppn, u8 transfer_len)
|
||||||
{
|
{
|
||||||
unsigned char *cdb = lrbp->cmd->cmnd;
|
unsigned char *cdb = lrbp->cmd->cmnd;
|
||||||
__be64 ppn_tmp = ppn;
|
__be64 ppn_tmp = ppn;
|
||||||
@ -346,256 +336,11 @@ ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
|
|||||||
/* ppn value is stored as big-endian in the host memory */
|
/* ppn value is stored as big-endian in the host memory */
|
||||||
memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
|
memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
|
||||||
cdb[14] = transfer_len;
|
cdb[14] = transfer_len;
|
||||||
cdb[15] = read_id;
|
cdb[15] = 0;
|
||||||
|
|
||||||
lrbp->cmd->cmd_len = UFS_CDB_SIZE;
|
lrbp->cmd->cmd_len = UFS_CDB_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void ufshpb_set_write_buf_cmd(unsigned char *cdb,
|
|
||||||
unsigned long lpn, unsigned int len,
|
|
||||||
int read_id)
|
|
||||||
{
|
|
||||||
cdb[0] = UFSHPB_WRITE_BUFFER;
|
|
||||||
cdb[1] = UFSHPB_WRITE_BUFFER_PREFETCH_ID;
|
|
||||||
|
|
||||||
put_unaligned_be32(lpn, &cdb[2]);
|
|
||||||
cdb[6] = read_id;
|
|
||||||
put_unaligned_be16(len * HPB_ENTRY_SIZE, &cdb[7]);
|
|
||||||
|
|
||||||
cdb[9] = 0x00; /* Control = 0x00 */
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct ufshpb_req *ufshpb_get_pre_req(struct ufshpb_lu *hpb)
|
|
||||||
{
|
|
||||||
struct ufshpb_req *pre_req;
|
|
||||||
|
|
||||||
if (hpb->num_inflight_pre_req >= hpb->throttle_pre_req) {
|
|
||||||
dev_info(&hpb->sdev_ufs_lu->sdev_dev,
|
|
||||||
"pre_req throttle. inflight %d throttle %d",
|
|
||||||
hpb->num_inflight_pre_req, hpb->throttle_pre_req);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
pre_req = list_first_entry_or_null(&hpb->lh_pre_req_free,
|
|
||||||
struct ufshpb_req, list_req);
|
|
||||||
if (!pre_req) {
|
|
||||||
dev_info(&hpb->sdev_ufs_lu->sdev_dev, "There is no pre_req");
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
list_del_init(&pre_req->list_req);
|
|
||||||
hpb->num_inflight_pre_req++;
|
|
||||||
|
|
||||||
return pre_req;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void ufshpb_put_pre_req(struct ufshpb_lu *hpb,
|
|
||||||
struct ufshpb_req *pre_req)
|
|
||||||
{
|
|
||||||
pre_req->req = NULL;
|
|
||||||
bio_reset(pre_req->bio);
|
|
||||||
list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
|
|
||||||
hpb->num_inflight_pre_req--;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ufshpb_pre_req_compl_fn(struct request *req, blk_status_t error)
|
|
||||||
{
|
|
||||||
struct ufshpb_req *pre_req = (struct ufshpb_req *)req->end_io_data;
|
|
||||||
struct ufshpb_lu *hpb = pre_req->hpb;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (error) {
|
|
||||||
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
|
|
||||||
struct scsi_sense_hdr sshdr;
|
|
||||||
|
|
||||||
dev_err(&hpb->sdev_ufs_lu->sdev_dev, "block status %d", error);
|
|
||||||
scsi_command_normalize_sense(cmd, &sshdr);
|
|
||||||
dev_err(&hpb->sdev_ufs_lu->sdev_dev,
|
|
||||||
"code %x sense_key %x asc %x ascq %x",
|
|
||||||
sshdr.response_code,
|
|
||||||
sshdr.sense_key, sshdr.asc, sshdr.ascq);
|
|
||||||
dev_err(&hpb->sdev_ufs_lu->sdev_dev,
|
|
||||||
"byte4 %x byte5 %x byte6 %x additional_len %x",
|
|
||||||
sshdr.byte4, sshdr.byte5,
|
|
||||||
sshdr.byte6, sshdr.additional_length);
|
|
||||||
}
|
|
||||||
|
|
||||||
blk_mq_free_request(req);
|
|
||||||
spin_lock_irqsave(&hpb->rgn_state_lock, flags);
|
|
||||||
ufshpb_put_pre_req(pre_req->hpb, pre_req);
|
|
||||||
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ufshpb_prep_entry(struct ufshpb_req *pre_req, struct page *page)
|
|
||||||
{
|
|
||||||
struct ufshpb_lu *hpb = pre_req->hpb;
|
|
||||||
struct ufshpb_region *rgn;
|
|
||||||
struct ufshpb_subregion *srgn;
|
|
||||||
__be64 *addr;
|
|
||||||
int offset = 0;
|
|
||||||
int copied;
|
|
||||||
unsigned long lpn = pre_req->wb.lpn;
|
|
||||||
int rgn_idx, srgn_idx, srgn_offset;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
addr = page_address(page);
|
|
||||||
ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
|
|
||||||
|
|
||||||
spin_lock_irqsave(&hpb->rgn_state_lock, flags);
|
|
||||||
|
|
||||||
next_offset:
|
|
||||||
rgn = hpb->rgn_tbl + rgn_idx;
|
|
||||||
srgn = rgn->srgn_tbl + srgn_idx;
|
|
||||||
|
|
||||||
if (!ufshpb_is_valid_srgn(rgn, srgn))
|
|
||||||
goto mctx_error;
|
|
||||||
|
|
||||||
if (!srgn->mctx)
|
|
||||||
goto mctx_error;
|
|
||||||
|
|
||||||
copied = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset,
|
|
||||||
pre_req->wb.len - offset,
|
|
||||||
&addr[offset]);
|
|
||||||
|
|
||||||
if (copied < 0)
|
|
||||||
goto mctx_error;
|
|
||||||
|
|
||||||
offset += copied;
|
|
||||||
srgn_offset += copied;
|
|
||||||
|
|
||||||
if (srgn_offset == hpb->entries_per_srgn) {
|
|
||||||
srgn_offset = 0;
|
|
||||||
|
|
||||||
if (++srgn_idx == hpb->srgns_per_rgn) {
|
|
||||||
srgn_idx = 0;
|
|
||||||
rgn_idx++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (offset < pre_req->wb.len)
|
|
||||||
goto next_offset;
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
|
|
||||||
return 0;
|
|
||||||
mctx_error:
|
|
||||||
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ufshpb_pre_req_add_bio_page(struct ufshpb_lu *hpb,
|
|
||||||
struct request_queue *q,
|
|
||||||
struct ufshpb_req *pre_req)
|
|
||||||
{
|
|
||||||
struct page *page = pre_req->wb.m_page;
|
|
||||||
struct bio *bio = pre_req->bio;
|
|
||||||
int entries_bytes, ret;
|
|
||||||
|
|
||||||
if (!page)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
if (ufshpb_prep_entry(pre_req, page))
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
entries_bytes = pre_req->wb.len * sizeof(__be64);
|
|
||||||
|
|
||||||
ret = bio_add_pc_page(q, bio, page, entries_bytes, 0);
|
|
||||||
if (ret != entries_bytes) {
|
|
||||||
dev_err(&hpb->sdev_ufs_lu->sdev_dev,
|
|
||||||
"bio_add_pc_page fail: %d", ret);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int ufshpb_get_read_id(struct ufshpb_lu *hpb)
|
|
||||||
{
|
|
||||||
if (++hpb->cur_read_id >= MAX_HPB_READ_ID)
|
|
||||||
hpb->cur_read_id = 1;
|
|
||||||
return hpb->cur_read_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ufshpb_execute_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
|
|
||||||
struct ufshpb_req *pre_req, int read_id)
|
|
||||||
{
|
|
||||||
struct scsi_device *sdev = cmd->device;
|
|
||||||
struct request_queue *q = sdev->request_queue;
|
|
||||||
struct request *req;
|
|
||||||
struct scsi_request *rq;
|
|
||||||
struct bio *bio = pre_req->bio;
|
|
||||||
|
|
||||||
pre_req->hpb = hpb;
|
|
||||||
pre_req->wb.lpn = sectors_to_logical(cmd->device,
|
|
||||||
blk_rq_pos(scsi_cmd_to_rq(cmd)));
|
|
||||||
pre_req->wb.len = sectors_to_logical(cmd->device,
|
|
||||||
blk_rq_sectors(scsi_cmd_to_rq(cmd)));
|
|
||||||
if (ufshpb_pre_req_add_bio_page(hpb, q, pre_req))
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
req = pre_req->req;
|
|
||||||
|
|
||||||
/* 1. request setup */
|
|
||||||
blk_rq_append_bio(req, bio);
|
|
||||||
req->rq_disk = NULL;
|
|
||||||
req->end_io_data = (void *)pre_req;
|
|
||||||
req->end_io = ufshpb_pre_req_compl_fn;
|
|
||||||
|
|
||||||
/* 2. scsi_request setup */
|
|
||||||
rq = scsi_req(req);
|
|
||||||
rq->retries = 1;
|
|
||||||
|
|
||||||
ufshpb_set_write_buf_cmd(rq->cmd, pre_req->wb.lpn, pre_req->wb.len,
|
|
||||||
read_id);
|
|
||||||
rq->cmd_len = scsi_command_size(rq->cmd);
|
|
||||||
|
|
||||||
if (blk_insert_cloned_request(q, req) != BLK_STS_OK)
|
|
||||||
return -EAGAIN;
|
|
||||||
|
|
||||||
hpb->stats.pre_req_cnt++;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ufshpb_issue_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
|
|
||||||
int *read_id)
|
|
||||||
{
|
|
||||||
struct ufshpb_req *pre_req;
|
|
||||||
struct request *req = NULL;
|
|
||||||
unsigned long flags;
|
|
||||||
int _read_id;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
req = blk_get_request(cmd->device->request_queue,
|
|
||||||
REQ_OP_DRV_OUT | REQ_SYNC, BLK_MQ_REQ_NOWAIT);
|
|
||||||
if (IS_ERR(req))
|
|
||||||
return -EAGAIN;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&hpb->rgn_state_lock, flags);
|
|
||||||
pre_req = ufshpb_get_pre_req(hpb);
|
|
||||||
if (!pre_req) {
|
|
||||||
ret = -EAGAIN;
|
|
||||||
goto unlock_out;
|
|
||||||
}
|
|
||||||
_read_id = ufshpb_get_read_id(hpb);
|
|
||||||
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
|
|
||||||
|
|
||||||
pre_req->req = req;
|
|
||||||
|
|
||||||
ret = ufshpb_execute_pre_req(hpb, cmd, pre_req, _read_id);
|
|
||||||
if (ret)
|
|
||||||
goto free_pre_req;
|
|
||||||
|
|
||||||
*read_id = _read_id;
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
free_pre_req:
|
|
||||||
spin_lock_irqsave(&hpb->rgn_state_lock, flags);
|
|
||||||
ufshpb_put_pre_req(hpb, pre_req);
|
|
||||||
unlock_out:
|
|
||||||
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
|
|
||||||
blk_put_request(req);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function will set up HPB read command using host-side L2P map data.
|
* This function will set up HPB read command using host-side L2P map data.
|
||||||
*/
|
*/
|
||||||
@ -609,7 +354,6 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
|||||||
__be64 ppn;
|
__be64 ppn;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int transfer_len, rgn_idx, srgn_idx, srgn_offset;
|
int transfer_len, rgn_idx, srgn_idx, srgn_offset;
|
||||||
int read_id = 0;
|
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
hpb = ufshpb_get_hpb_data(cmd->device);
|
hpb = ufshpb_get_hpb_data(cmd->device);
|
||||||
@ -685,24 +429,8 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
|||||||
dev_err(hba->dev, "get ppn failed. err %d\n", err);
|
dev_err(hba->dev, "get ppn failed. err %d\n", err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
if (!ufshpb_is_legacy(hba) &&
|
|
||||||
ufshpb_is_required_wb(hpb, transfer_len)) {
|
|
||||||
err = ufshpb_issue_pre_req(hpb, cmd, &read_id);
|
|
||||||
if (err) {
|
|
||||||
unsigned long timeout;
|
|
||||||
|
|
||||||
timeout = cmd->jiffies_at_alloc + msecs_to_jiffies(
|
ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len);
|
||||||
hpb->params.requeue_timeout_ms);
|
|
||||||
|
|
||||||
if (time_before(jiffies, timeout))
|
|
||||||
return -EAGAIN;
|
|
||||||
|
|
||||||
hpb->stats.miss_cnt++;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len, read_id);
|
|
||||||
|
|
||||||
hpb->stats.hit_cnt++;
|
hpb->stats.hit_cnt++;
|
||||||
return 0;
|
return 0;
|
||||||
@ -1841,16 +1569,11 @@ static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
|
|||||||
u32 entries_per_rgn;
|
u32 entries_per_rgn;
|
||||||
u64 rgn_mem_size, tmp;
|
u64 rgn_mem_size, tmp;
|
||||||
|
|
||||||
/* for pre_req */
|
|
||||||
hpb->pre_req_min_tr_len = hpb_dev_info->max_hpb_single_cmd + 1;
|
|
||||||
|
|
||||||
if (ufshpb_is_legacy(hba))
|
if (ufshpb_is_legacy(hba))
|
||||||
hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
|
hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
|
||||||
else
|
else
|
||||||
hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH;
|
hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH;
|
||||||
|
|
||||||
hpb->cur_read_id = 0;
|
|
||||||
|
|
||||||
hpb->lu_pinned_start = hpb_lu_info->pinned_start;
|
hpb->lu_pinned_start = hpb_lu_info->pinned_start;
|
||||||
hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
|
hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
|
||||||
(hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
|
(hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
|
||||||
|
@ -241,8 +241,6 @@ struct ufshpb_lu {
|
|||||||
spinlock_t param_lock;
|
spinlock_t param_lock;
|
||||||
|
|
||||||
struct list_head lh_pre_req_free;
|
struct list_head lh_pre_req_free;
|
||||||
int cur_read_id;
|
|
||||||
int pre_req_min_tr_len;
|
|
||||||
int pre_req_max_tr_len;
|
int pre_req_max_tr_len;
|
||||||
|
|
||||||
/* cached L2P map management worker */
|
/* cached L2P map management worker */
|
||||||
|
@ -134,7 +134,7 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev)
|
|||||||
if (!master)
|
if (!master)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
master->bus_num = dfl_dev->id;
|
master->bus_num = -1;
|
||||||
|
|
||||||
hw = spi_master_get_devdata(master);
|
hw = spi_master_get_devdata(master);
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ static int altera_spi_probe(struct platform_device *pdev)
|
|||||||
return err;
|
return err;
|
||||||
|
|
||||||
/* setup the master state. */
|
/* setup the master state. */
|
||||||
master->bus_num = pdev->id;
|
master->bus_num = -1;
|
||||||
|
|
||||||
if (pdata) {
|
if (pdata) {
|
||||||
if (pdata->num_chipselect > ALTERA_SPI_MAX_CS) {
|
if (pdata->num_chipselect > ALTERA_SPI_MAX_CS) {
|
||||||
|
@ -1716,7 +1716,7 @@ static int verify_controller_parameters(struct pl022 *pl022,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
|
if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) {
|
||||||
dev_err(&pl022->adev->dev,
|
dev_err(&pl022->adev->dev,
|
||||||
"Microwire half duplex mode requested,"
|
"Microwire half duplex mode requested,"
|
||||||
" but this is only available in the"
|
" but this is only available in the"
|
||||||
@ -1724,6 +1724,7 @@ static int verify_controller_parameters(struct pl022 *pl022,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1194,7 +1194,7 @@ static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tegra_slink_runtime_resume(struct device *dev)
|
static int __maybe_unused tegra_slink_runtime_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
struct spi_master *master = dev_get_drvdata(dev);
|
struct spi_master *master = dev_get_drvdata(dev);
|
||||||
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
|
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
|
||||||
|
@ -576,7 +576,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
|
|||||||
/* Last one doesn't continue. */
|
/* Last one doesn't continue. */
|
||||||
desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
|
desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
|
||||||
if (!indirect && vq->use_dma_api)
|
if (!indirect && vq->use_dma_api)
|
||||||
vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags =
|
vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
|
||||||
~VRING_DESC_F_NEXT;
|
~VRING_DESC_F_NEXT;
|
||||||
|
|
||||||
if (indirect) {
|
if (indirect) {
|
||||||
|
@ -358,7 +358,7 @@ int autofs_wait(struct autofs_sb_info *sbi,
|
|||||||
qstr.len = strlen(p);
|
qstr.len = strlen(p);
|
||||||
offset = p - name;
|
offset = p - name;
|
||||||
}
|
}
|
||||||
qstr.hash = full_name_hash(dentry, name, qstr.len);
|
qstr.hash = full_name_hash(dentry, qstr.name, qstr.len);
|
||||||
|
|
||||||
if (mutex_lock_interruptible(&sbi->wq_mutex)) {
|
if (mutex_lock_interruptible(&sbi->wq_mutex)) {
|
||||||
kfree(name);
|
kfree(name);
|
||||||
|
@ -172,9 +172,10 @@ static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
|
|||||||
/* Hash through the page sector by sector */
|
/* Hash through the page sector by sector */
|
||||||
for (pg_offset = 0; pg_offset < bytes_left;
|
for (pg_offset = 0; pg_offset < bytes_left;
|
||||||
pg_offset += sectorsize) {
|
pg_offset += sectorsize) {
|
||||||
kaddr = page_address(page);
|
kaddr = kmap_atomic(page);
|
||||||
crypto_shash_digest(shash, kaddr + pg_offset,
|
crypto_shash_digest(shash, kaddr + pg_offset,
|
||||||
sectorsize, csum);
|
sectorsize, csum);
|
||||||
|
kunmap_atomic(kaddr);
|
||||||
|
|
||||||
if (memcmp(&csum, cb_sum, csum_size) != 0) {
|
if (memcmp(&csum, cb_sum, csum_size) != 0) {
|
||||||
btrfs_print_data_csum_error(inode, disk_start,
|
btrfs_print_data_csum_error(inode, disk_start,
|
||||||
|
@ -287,8 +287,9 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
|
|||||||
cur_size = min_t(unsigned long, compressed_size,
|
cur_size = min_t(unsigned long, compressed_size,
|
||||||
PAGE_SIZE);
|
PAGE_SIZE);
|
||||||
|
|
||||||
kaddr = page_address(cpage);
|
kaddr = kmap_atomic(cpage);
|
||||||
write_extent_buffer(leaf, kaddr, ptr, cur_size);
|
write_extent_buffer(leaf, kaddr, ptr, cur_size);
|
||||||
|
kunmap_atomic(kaddr);
|
||||||
|
|
||||||
i++;
|
i++;
|
||||||
ptr += cur_size;
|
ptr += cur_size;
|
||||||
|
@ -141,7 +141,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
*total_in = 0;
|
*total_in = 0;
|
||||||
|
|
||||||
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
|
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
|
||||||
data_in = page_address(in_page);
|
data_in = kmap(in_page);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* store the size of all chunks of compressed data in
|
* store the size of all chunks of compressed data in
|
||||||
@ -152,7 +152,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
cpage_out = page_address(out_page);
|
cpage_out = kmap(out_page);
|
||||||
out_offset = LZO_LEN;
|
out_offset = LZO_LEN;
|
||||||
tot_out = LZO_LEN;
|
tot_out = LZO_LEN;
|
||||||
pages[0] = out_page;
|
pages[0] = out_page;
|
||||||
@ -210,6 +210,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
if (out_len == 0 && tot_in >= len)
|
if (out_len == 0 && tot_in >= len)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
kunmap(out_page);
|
||||||
if (nr_pages == nr_dest_pages) {
|
if (nr_pages == nr_dest_pages) {
|
||||||
out_page = NULL;
|
out_page = NULL;
|
||||||
ret = -E2BIG;
|
ret = -E2BIG;
|
||||||
@ -221,7 +222,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
cpage_out = page_address(out_page);
|
cpage_out = kmap(out_page);
|
||||||
pages[nr_pages++] = out_page;
|
pages[nr_pages++] = out_page;
|
||||||
|
|
||||||
pg_bytes_left = PAGE_SIZE;
|
pg_bytes_left = PAGE_SIZE;
|
||||||
@ -243,11 +244,12 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
bytes_left = len - tot_in;
|
bytes_left = len - tot_in;
|
||||||
|
kunmap(in_page);
|
||||||
put_page(in_page);
|
put_page(in_page);
|
||||||
|
|
||||||
start += PAGE_SIZE;
|
start += PAGE_SIZE;
|
||||||
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
|
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
|
||||||
data_in = page_address(in_page);
|
data_in = kmap(in_page);
|
||||||
in_len = min(bytes_left, PAGE_SIZE);
|
in_len = min(bytes_left, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -257,17 +259,22 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* store the size of all chunks of compressed data */
|
/* store the size of all chunks of compressed data */
|
||||||
sizes_ptr = page_address(pages[0]);
|
sizes_ptr = kmap_local_page(pages[0]);
|
||||||
write_compress_length(sizes_ptr, tot_out);
|
write_compress_length(sizes_ptr, tot_out);
|
||||||
|
kunmap_local(sizes_ptr);
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
*total_out = tot_out;
|
*total_out = tot_out;
|
||||||
*total_in = tot_in;
|
*total_in = tot_in;
|
||||||
out:
|
out:
|
||||||
*out_pages = nr_pages;
|
*out_pages = nr_pages;
|
||||||
|
if (out_page)
|
||||||
|
kunmap(out_page);
|
||||||
|
|
||||||
if (in_page)
|
if (in_page) {
|
||||||
|
kunmap(in_page);
|
||||||
put_page(in_page);
|
put_page(in_page);
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -283,6 +290,7 @@ static void copy_compressed_segment(struct compressed_bio *cb,
|
|||||||
u32 orig_in = *cur_in;
|
u32 orig_in = *cur_in;
|
||||||
|
|
||||||
while (*cur_in < orig_in + len) {
|
while (*cur_in < orig_in + len) {
|
||||||
|
char *kaddr;
|
||||||
struct page *cur_page;
|
struct page *cur_page;
|
||||||
u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in),
|
u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in),
|
||||||
orig_in + len - *cur_in);
|
orig_in + len - *cur_in);
|
||||||
@ -290,9 +298,11 @@ static void copy_compressed_segment(struct compressed_bio *cb,
|
|||||||
ASSERT(copy_len);
|
ASSERT(copy_len);
|
||||||
cur_page = cb->compressed_pages[*cur_in / PAGE_SIZE];
|
cur_page = cb->compressed_pages[*cur_in / PAGE_SIZE];
|
||||||
|
|
||||||
|
kaddr = kmap(cur_page);
|
||||||
memcpy(dest + *cur_in - orig_in,
|
memcpy(dest + *cur_in - orig_in,
|
||||||
page_address(cur_page) + offset_in_page(*cur_in),
|
kaddr + offset_in_page(*cur_in),
|
||||||
copy_len);
|
copy_len);
|
||||||
|
kunmap(cur_page);
|
||||||
|
|
||||||
*cur_in += copy_len;
|
*cur_in += copy_len;
|
||||||
}
|
}
|
||||||
@ -303,6 +313,7 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
|||||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||||
const struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
|
const struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
|
||||||
const u32 sectorsize = fs_info->sectorsize;
|
const u32 sectorsize = fs_info->sectorsize;
|
||||||
|
char *kaddr;
|
||||||
int ret;
|
int ret;
|
||||||
/* Compressed data length, can be unaligned */
|
/* Compressed data length, can be unaligned */
|
||||||
u32 len_in;
|
u32 len_in;
|
||||||
@ -311,7 +322,9 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
|||||||
/* Bytes decompressed so far */
|
/* Bytes decompressed so far */
|
||||||
u32 cur_out = 0;
|
u32 cur_out = 0;
|
||||||
|
|
||||||
len_in = read_compress_length(page_address(cb->compressed_pages[0]));
|
kaddr = kmap(cb->compressed_pages[0]);
|
||||||
|
len_in = read_compress_length(kaddr);
|
||||||
|
kunmap(cb->compressed_pages[0]);
|
||||||
cur_in += LZO_LEN;
|
cur_in += LZO_LEN;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -344,9 +357,9 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
|||||||
ASSERT(cur_in / sectorsize ==
|
ASSERT(cur_in / sectorsize ==
|
||||||
(cur_in + LZO_LEN - 1) / sectorsize);
|
(cur_in + LZO_LEN - 1) / sectorsize);
|
||||||
cur_page = cb->compressed_pages[cur_in / PAGE_SIZE];
|
cur_page = cb->compressed_pages[cur_in / PAGE_SIZE];
|
||||||
|
kaddr = kmap(cur_page);
|
||||||
ASSERT(cur_page);
|
ASSERT(cur_page);
|
||||||
seg_len = read_compress_length(page_address(cur_page) +
|
seg_len = read_compress_length(kaddr + offset_in_page(cur_in));
|
||||||
offset_in_page(cur_in));
|
|
||||||
cur_in += LZO_LEN;
|
cur_in += LZO_LEN;
|
||||||
|
|
||||||
/* Copy the compressed segment payload into workspace */
|
/* Copy the compressed segment payload into workspace */
|
||||||
@ -431,7 +444,7 @@ int lzo_decompress(struct list_head *ws, unsigned char *data_in,
|
|||||||
destlen = min_t(unsigned long, destlen, PAGE_SIZE);
|
destlen = min_t(unsigned long, destlen, PAGE_SIZE);
|
||||||
bytes = min_t(unsigned long, destlen, out_len - start_byte);
|
bytes = min_t(unsigned long, destlen, out_len - start_byte);
|
||||||
|
|
||||||
kaddr = page_address(dest_page);
|
kaddr = kmap_local_page(dest_page);
|
||||||
memcpy(kaddr, workspace->buf + start_byte, bytes);
|
memcpy(kaddr, workspace->buf + start_byte, bytes);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -441,6 +454,7 @@ int lzo_decompress(struct list_head *ws, unsigned char *data_in,
|
|||||||
*/
|
*/
|
||||||
if (bytes < destlen)
|
if (bytes < destlen)
|
||||||
memset(kaddr+bytes, 0, destlen-bytes);
|
memset(kaddr+bytes, 0, destlen-bytes);
|
||||||
|
kunmap_local(kaddr);
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -126,7 +126,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
cpage_out = page_address(out_page);
|
cpage_out = kmap(out_page);
|
||||||
pages[0] = out_page;
|
pages[0] = out_page;
|
||||||
nr_pages = 1;
|
nr_pages = 1;
|
||||||
|
|
||||||
@ -148,22 +148,26 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < in_buf_pages; i++) {
|
for (i = 0; i < in_buf_pages; i++) {
|
||||||
if (in_page)
|
if (in_page) {
|
||||||
|
kunmap(in_page);
|
||||||
put_page(in_page);
|
put_page(in_page);
|
||||||
|
}
|
||||||
in_page = find_get_page(mapping,
|
in_page = find_get_page(mapping,
|
||||||
start >> PAGE_SHIFT);
|
start >> PAGE_SHIFT);
|
||||||
data_in = page_address(in_page);
|
data_in = kmap(in_page);
|
||||||
memcpy(workspace->buf + i * PAGE_SIZE,
|
memcpy(workspace->buf + i * PAGE_SIZE,
|
||||||
data_in, PAGE_SIZE);
|
data_in, PAGE_SIZE);
|
||||||
start += PAGE_SIZE;
|
start += PAGE_SIZE;
|
||||||
}
|
}
|
||||||
workspace->strm.next_in = workspace->buf;
|
workspace->strm.next_in = workspace->buf;
|
||||||
} else {
|
} else {
|
||||||
if (in_page)
|
if (in_page) {
|
||||||
|
kunmap(in_page);
|
||||||
put_page(in_page);
|
put_page(in_page);
|
||||||
|
}
|
||||||
in_page = find_get_page(mapping,
|
in_page = find_get_page(mapping,
|
||||||
start >> PAGE_SHIFT);
|
start >> PAGE_SHIFT);
|
||||||
data_in = page_address(in_page);
|
data_in = kmap(in_page);
|
||||||
start += PAGE_SIZE;
|
start += PAGE_SIZE;
|
||||||
workspace->strm.next_in = data_in;
|
workspace->strm.next_in = data_in;
|
||||||
}
|
}
|
||||||
@ -192,6 +196,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
* the stream end if required
|
* the stream end if required
|
||||||
*/
|
*/
|
||||||
if (workspace->strm.avail_out == 0) {
|
if (workspace->strm.avail_out == 0) {
|
||||||
|
kunmap(out_page);
|
||||||
if (nr_pages == nr_dest_pages) {
|
if (nr_pages == nr_dest_pages) {
|
||||||
out_page = NULL;
|
out_page = NULL;
|
||||||
ret = -E2BIG;
|
ret = -E2BIG;
|
||||||
@ -202,7 +207,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
cpage_out = page_address(out_page);
|
cpage_out = kmap(out_page);
|
||||||
pages[nr_pages] = out_page;
|
pages[nr_pages] = out_page;
|
||||||
nr_pages++;
|
nr_pages++;
|
||||||
workspace->strm.avail_out = PAGE_SIZE;
|
workspace->strm.avail_out = PAGE_SIZE;
|
||||||
@ -229,6 +234,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
goto out;
|
goto out;
|
||||||
} else if (workspace->strm.avail_out == 0) {
|
} else if (workspace->strm.avail_out == 0) {
|
||||||
/* get another page for the stream end */
|
/* get another page for the stream end */
|
||||||
|
kunmap(out_page);
|
||||||
if (nr_pages == nr_dest_pages) {
|
if (nr_pages == nr_dest_pages) {
|
||||||
out_page = NULL;
|
out_page = NULL;
|
||||||
ret = -E2BIG;
|
ret = -E2BIG;
|
||||||
@ -239,7 +245,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
cpage_out = page_address(out_page);
|
cpage_out = kmap(out_page);
|
||||||
pages[nr_pages] = out_page;
|
pages[nr_pages] = out_page;
|
||||||
nr_pages++;
|
nr_pages++;
|
||||||
workspace->strm.avail_out = PAGE_SIZE;
|
workspace->strm.avail_out = PAGE_SIZE;
|
||||||
@ -258,8 +264,13 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
*total_in = workspace->strm.total_in;
|
*total_in = workspace->strm.total_in;
|
||||||
out:
|
out:
|
||||||
*out_pages = nr_pages;
|
*out_pages = nr_pages;
|
||||||
if (in_page)
|
if (out_page)
|
||||||
|
kunmap(out_page);
|
||||||
|
|
||||||
|
if (in_page) {
|
||||||
|
kunmap(in_page);
|
||||||
put_page(in_page);
|
put_page(in_page);
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -276,7 +287,7 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
|||||||
unsigned long buf_start;
|
unsigned long buf_start;
|
||||||
struct page **pages_in = cb->compressed_pages;
|
struct page **pages_in = cb->compressed_pages;
|
||||||
|
|
||||||
data_in = page_address(pages_in[page_in_index]);
|
data_in = kmap(pages_in[page_in_index]);
|
||||||
workspace->strm.next_in = data_in;
|
workspace->strm.next_in = data_in;
|
||||||
workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
|
workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
|
||||||
workspace->strm.total_in = 0;
|
workspace->strm.total_in = 0;
|
||||||
@ -298,6 +309,7 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
|||||||
|
|
||||||
if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
|
if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
|
||||||
pr_warn("BTRFS: inflateInit failed\n");
|
pr_warn("BTRFS: inflateInit failed\n");
|
||||||
|
kunmap(pages_in[page_in_index]);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
while (workspace->strm.total_in < srclen) {
|
while (workspace->strm.total_in < srclen) {
|
||||||
@ -324,13 +336,13 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
|||||||
|
|
||||||
if (workspace->strm.avail_in == 0) {
|
if (workspace->strm.avail_in == 0) {
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
kunmap(pages_in[page_in_index]);
|
||||||
page_in_index++;
|
page_in_index++;
|
||||||
if (page_in_index >= total_pages_in) {
|
if (page_in_index >= total_pages_in) {
|
||||||
data_in = NULL;
|
data_in = NULL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
data_in = page_address(pages_in[page_in_index]);
|
data_in = kmap(pages_in[page_in_index]);
|
||||||
workspace->strm.next_in = data_in;
|
workspace->strm.next_in = data_in;
|
||||||
tmp = srclen - workspace->strm.total_in;
|
tmp = srclen - workspace->strm.total_in;
|
||||||
workspace->strm.avail_in = min(tmp, PAGE_SIZE);
|
workspace->strm.avail_in = min(tmp, PAGE_SIZE);
|
||||||
@ -342,6 +354,8 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
|||||||
ret = 0;
|
ret = 0;
|
||||||
done:
|
done:
|
||||||
zlib_inflateEnd(&workspace->strm);
|
zlib_inflateEnd(&workspace->strm);
|
||||||
|
if (data_in)
|
||||||
|
kunmap(pages_in[page_in_index]);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
zero_fill_bio(cb->orig_bio);
|
zero_fill_bio(cb->orig_bio);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -399,7 +399,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
|
|
||||||
/* map in the first page of input data */
|
/* map in the first page of input data */
|
||||||
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
|
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
|
||||||
workspace->in_buf.src = page_address(in_page);
|
workspace->in_buf.src = kmap(in_page);
|
||||||
workspace->in_buf.pos = 0;
|
workspace->in_buf.pos = 0;
|
||||||
workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
|
workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
|
||||||
|
|
||||||
@ -411,7 +411,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
pages[nr_pages++] = out_page;
|
pages[nr_pages++] = out_page;
|
||||||
workspace->out_buf.dst = page_address(out_page);
|
workspace->out_buf.dst = kmap(out_page);
|
||||||
workspace->out_buf.pos = 0;
|
workspace->out_buf.pos = 0;
|
||||||
workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
|
workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
|
||||||
|
|
||||||
@ -446,6 +446,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
if (workspace->out_buf.pos == workspace->out_buf.size) {
|
if (workspace->out_buf.pos == workspace->out_buf.size) {
|
||||||
tot_out += PAGE_SIZE;
|
tot_out += PAGE_SIZE;
|
||||||
max_out -= PAGE_SIZE;
|
max_out -= PAGE_SIZE;
|
||||||
|
kunmap(out_page);
|
||||||
if (nr_pages == nr_dest_pages) {
|
if (nr_pages == nr_dest_pages) {
|
||||||
out_page = NULL;
|
out_page = NULL;
|
||||||
ret = -E2BIG;
|
ret = -E2BIG;
|
||||||
@ -457,7 +458,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
pages[nr_pages++] = out_page;
|
pages[nr_pages++] = out_page;
|
||||||
workspace->out_buf.dst = page_address(out_page);
|
workspace->out_buf.dst = kmap(out_page);
|
||||||
workspace->out_buf.pos = 0;
|
workspace->out_buf.pos = 0;
|
||||||
workspace->out_buf.size = min_t(size_t, max_out,
|
workspace->out_buf.size = min_t(size_t, max_out,
|
||||||
PAGE_SIZE);
|
PAGE_SIZE);
|
||||||
@ -472,12 +473,13 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
/* Check if we need more input */
|
/* Check if we need more input */
|
||||||
if (workspace->in_buf.pos == workspace->in_buf.size) {
|
if (workspace->in_buf.pos == workspace->in_buf.size) {
|
||||||
tot_in += PAGE_SIZE;
|
tot_in += PAGE_SIZE;
|
||||||
|
kunmap(in_page);
|
||||||
put_page(in_page);
|
put_page(in_page);
|
||||||
|
|
||||||
start += PAGE_SIZE;
|
start += PAGE_SIZE;
|
||||||
len -= PAGE_SIZE;
|
len -= PAGE_SIZE;
|
||||||
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
|
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
|
||||||
workspace->in_buf.src = page_address(in_page);
|
workspace->in_buf.src = kmap(in_page);
|
||||||
workspace->in_buf.pos = 0;
|
workspace->in_buf.pos = 0;
|
||||||
workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
|
workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
@ -504,6 +506,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
|
|
||||||
tot_out += PAGE_SIZE;
|
tot_out += PAGE_SIZE;
|
||||||
max_out -= PAGE_SIZE;
|
max_out -= PAGE_SIZE;
|
||||||
|
kunmap(out_page);
|
||||||
if (nr_pages == nr_dest_pages) {
|
if (nr_pages == nr_dest_pages) {
|
||||||
out_page = NULL;
|
out_page = NULL;
|
||||||
ret = -E2BIG;
|
ret = -E2BIG;
|
||||||
@ -515,7 +518,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
pages[nr_pages++] = out_page;
|
pages[nr_pages++] = out_page;
|
||||||
workspace->out_buf.dst = page_address(out_page);
|
workspace->out_buf.dst = kmap(out_page);
|
||||||
workspace->out_buf.pos = 0;
|
workspace->out_buf.pos = 0;
|
||||||
workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
|
workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
@ -531,8 +534,12 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
out:
|
out:
|
||||||
*out_pages = nr_pages;
|
*out_pages = nr_pages;
|
||||||
/* Cleanup */
|
/* Cleanup */
|
||||||
if (in_page)
|
if (in_page) {
|
||||||
|
kunmap(in_page);
|
||||||
put_page(in_page);
|
put_page(in_page);
|
||||||
|
}
|
||||||
|
if (out_page)
|
||||||
|
kunmap(out_page);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -556,7 +563,7 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
|||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
workspace->in_buf.src = page_address(pages_in[page_in_index]);
|
workspace->in_buf.src = kmap(pages_in[page_in_index]);
|
||||||
workspace->in_buf.pos = 0;
|
workspace->in_buf.pos = 0;
|
||||||
workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
|
workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
|
||||||
|
|
||||||
@ -592,14 +599,14 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
if (workspace->in_buf.pos == workspace->in_buf.size) {
|
if (workspace->in_buf.pos == workspace->in_buf.size) {
|
||||||
page_in_index++;
|
kunmap(pages_in[page_in_index++]);
|
||||||
if (page_in_index >= total_pages_in) {
|
if (page_in_index >= total_pages_in) {
|
||||||
workspace->in_buf.src = NULL;
|
workspace->in_buf.src = NULL;
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
srclen -= PAGE_SIZE;
|
srclen -= PAGE_SIZE;
|
||||||
workspace->in_buf.src = page_address(pages_in[page_in_index]);
|
workspace->in_buf.src = kmap(pages_in[page_in_index]);
|
||||||
workspace->in_buf.pos = 0;
|
workspace->in_buf.pos = 0;
|
||||||
workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
|
workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
@ -607,6 +614,8 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
|||||||
ret = 0;
|
ret = 0;
|
||||||
zero_fill_bio(cb->orig_bio);
|
zero_fill_bio(cb->orig_bio);
|
||||||
done:
|
done:
|
||||||
|
if (workspace->in_buf.src)
|
||||||
|
kunmap(pages_in[page_in_index]);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2330,7 +2330,6 @@ static int unsafe_request_wait(struct inode *inode)
|
|||||||
|
|
||||||
int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
||||||
{
|
{
|
||||||
struct ceph_file_info *fi = file->private_data;
|
|
||||||
struct inode *inode = file->f_mapping->host;
|
struct inode *inode = file->f_mapping->host;
|
||||||
struct ceph_inode_info *ci = ceph_inode(inode);
|
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||||
u64 flush_tid;
|
u64 flush_tid;
|
||||||
@ -2365,14 +2364,9 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
|||||||
if (err < 0)
|
if (err < 0)
|
||||||
ret = err;
|
ret = err;
|
||||||
|
|
||||||
if (errseq_check(&ci->i_meta_err, READ_ONCE(fi->meta_err))) {
|
err = file_check_and_advance_wb_err(file);
|
||||||
spin_lock(&file->f_lock);
|
|
||||||
err = errseq_check_and_advance(&ci->i_meta_err,
|
|
||||||
&fi->meta_err);
|
|
||||||
spin_unlock(&file->f_lock);
|
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
ret = err;
|
ret = err;
|
||||||
}
|
|
||||||
out:
|
out:
|
||||||
dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
|
dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -233,7 +233,6 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
|
|||||||
|
|
||||||
spin_lock_init(&fi->rw_contexts_lock);
|
spin_lock_init(&fi->rw_contexts_lock);
|
||||||
INIT_LIST_HEAD(&fi->rw_contexts);
|
INIT_LIST_HEAD(&fi->rw_contexts);
|
||||||
fi->meta_err = errseq_sample(&ci->i_meta_err);
|
|
||||||
fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
|
fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -541,8 +541,6 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
|
|||||||
|
|
||||||
ceph_fscache_inode_init(ci);
|
ceph_fscache_inode_init(ci);
|
||||||
|
|
||||||
ci->i_meta_err = 0;
|
|
||||||
|
|
||||||
return &ci->vfs_inode;
|
return &ci->vfs_inode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1493,7 +1493,6 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
|
|||||||
{
|
{
|
||||||
struct ceph_mds_request *req;
|
struct ceph_mds_request *req;
|
||||||
struct rb_node *p;
|
struct rb_node *p;
|
||||||
struct ceph_inode_info *ci;
|
|
||||||
|
|
||||||
dout("cleanup_session_requests mds%d\n", session->s_mds);
|
dout("cleanup_session_requests mds%d\n", session->s_mds);
|
||||||
mutex_lock(&mdsc->mutex);
|
mutex_lock(&mdsc->mutex);
|
||||||
@ -1502,16 +1501,10 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
|
|||||||
struct ceph_mds_request, r_unsafe_item);
|
struct ceph_mds_request, r_unsafe_item);
|
||||||
pr_warn_ratelimited(" dropping unsafe request %llu\n",
|
pr_warn_ratelimited(" dropping unsafe request %llu\n",
|
||||||
req->r_tid);
|
req->r_tid);
|
||||||
if (req->r_target_inode) {
|
if (req->r_target_inode)
|
||||||
/* dropping unsafe change of inode's attributes */
|
mapping_set_error(req->r_target_inode->i_mapping, -EIO);
|
||||||
ci = ceph_inode(req->r_target_inode);
|
if (req->r_unsafe_dir)
|
||||||
errseq_set(&ci->i_meta_err, -EIO);
|
mapping_set_error(req->r_unsafe_dir->i_mapping, -EIO);
|
||||||
}
|
|
||||||
if (req->r_unsafe_dir) {
|
|
||||||
/* dropping unsafe directory operation */
|
|
||||||
ci = ceph_inode(req->r_unsafe_dir);
|
|
||||||
errseq_set(&ci->i_meta_err, -EIO);
|
|
||||||
}
|
|
||||||
__unregister_request(mdsc, req);
|
__unregister_request(mdsc, req);
|
||||||
}
|
}
|
||||||
/* zero r_attempts, so kick_requests() will re-send requests */
|
/* zero r_attempts, so kick_requests() will re-send requests */
|
||||||
@ -1678,7 +1671,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
|
|||||||
spin_unlock(&mdsc->cap_dirty_lock);
|
spin_unlock(&mdsc->cap_dirty_lock);
|
||||||
|
|
||||||
if (dirty_dropped) {
|
if (dirty_dropped) {
|
||||||
errseq_set(&ci->i_meta_err, -EIO);
|
mapping_set_error(inode->i_mapping, -EIO);
|
||||||
|
|
||||||
if (ci->i_wrbuffer_ref_head == 0 &&
|
if (ci->i_wrbuffer_ref_head == 0 &&
|
||||||
ci->i_wr_ref == 0 &&
|
ci->i_wr_ref == 0 &&
|
||||||
|
@ -1002,16 +1002,16 @@ static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
|
|||||||
struct ceph_fs_client *new = fc->s_fs_info;
|
struct ceph_fs_client *new = fc->s_fs_info;
|
||||||
struct ceph_mount_options *fsopt = new->mount_options;
|
struct ceph_mount_options *fsopt = new->mount_options;
|
||||||
struct ceph_options *opt = new->client->options;
|
struct ceph_options *opt = new->client->options;
|
||||||
struct ceph_fs_client *other = ceph_sb_to_client(sb);
|
struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
|
||||||
|
|
||||||
dout("ceph_compare_super %p\n", sb);
|
dout("ceph_compare_super %p\n", sb);
|
||||||
|
|
||||||
if (compare_mount_options(fsopt, opt, other)) {
|
if (compare_mount_options(fsopt, opt, fsc)) {
|
||||||
dout("monitor(s)/mount options don't match\n");
|
dout("monitor(s)/mount options don't match\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if ((opt->flags & CEPH_OPT_FSID) &&
|
if ((opt->flags & CEPH_OPT_FSID) &&
|
||||||
ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
|
ceph_fsid_compare(&opt->fsid, &fsc->client->fsid)) {
|
||||||
dout("fsid doesn't match\n");
|
dout("fsid doesn't match\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1019,6 +1019,17 @@ static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
|
|||||||
dout("flags differ\n");
|
dout("flags differ\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (fsc->blocklisted && !ceph_test_mount_opt(fsc, CLEANRECOVER)) {
|
||||||
|
dout("client is blocklisted (and CLEANRECOVER is not set)\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) {
|
||||||
|
dout("client has been forcibly unmounted\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -429,8 +429,6 @@ struct ceph_inode_info {
|
|||||||
#ifdef CONFIG_CEPH_FSCACHE
|
#ifdef CONFIG_CEPH_FSCACHE
|
||||||
struct fscache_cookie *fscache;
|
struct fscache_cookie *fscache;
|
||||||
#endif
|
#endif
|
||||||
errseq_t i_meta_err;
|
|
||||||
|
|
||||||
struct inode vfs_inode; /* at end */
|
struct inode vfs_inode; /* at end */
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -774,7 +772,6 @@ struct ceph_file_info {
|
|||||||
spinlock_t rw_contexts_lock;
|
spinlock_t rw_contexts_lock;
|
||||||
struct list_head rw_contexts;
|
struct list_head rw_contexts;
|
||||||
|
|
||||||
errseq_t meta_err;
|
|
||||||
u32 filp_gen;
|
u32 filp_gen;
|
||||||
atomic_t num_locks;
|
atomic_t num_locks;
|
||||||
};
|
};
|
||||||
|
@ -1121,6 +1121,9 @@ int fuse_init_fs_context_submount(struct fs_context *fsc);
|
|||||||
*/
|
*/
|
||||||
void fuse_conn_destroy(struct fuse_mount *fm);
|
void fuse_conn_destroy(struct fuse_mount *fm);
|
||||||
|
|
||||||
|
/* Drop the connection and free the fuse mount */
|
||||||
|
void fuse_mount_destroy(struct fuse_mount *fm);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add connection to control filesystem
|
* Add connection to control filesystem
|
||||||
*/
|
*/
|
||||||
|
@ -457,14 +457,6 @@ static void fuse_send_destroy(struct fuse_mount *fm)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fuse_put_super(struct super_block *sb)
|
|
||||||
{
|
|
||||||
struct fuse_mount *fm = get_fuse_mount_super(sb);
|
|
||||||
|
|
||||||
fuse_conn_put(fm->fc);
|
|
||||||
kfree(fm);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr)
|
static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr)
|
||||||
{
|
{
|
||||||
stbuf->f_type = FUSE_SUPER_MAGIC;
|
stbuf->f_type = FUSE_SUPER_MAGIC;
|
||||||
@ -1003,7 +995,6 @@ static const struct super_operations fuse_super_operations = {
|
|||||||
.evict_inode = fuse_evict_inode,
|
.evict_inode = fuse_evict_inode,
|
||||||
.write_inode = fuse_write_inode,
|
.write_inode = fuse_write_inode,
|
||||||
.drop_inode = generic_delete_inode,
|
.drop_inode = generic_delete_inode,
|
||||||
.put_super = fuse_put_super,
|
|
||||||
.umount_begin = fuse_umount_begin,
|
.umount_begin = fuse_umount_begin,
|
||||||
.statfs = fuse_statfs,
|
.statfs = fuse_statfs,
|
||||||
.sync_fs = fuse_sync_fs,
|
.sync_fs = fuse_sync_fs,
|
||||||
@ -1424,20 +1415,17 @@ static int fuse_get_tree_submount(struct fs_context *fsc)
|
|||||||
if (!fm)
|
if (!fm)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
fm->fc = fuse_conn_get(fc);
|
||||||
fsc->s_fs_info = fm;
|
fsc->s_fs_info = fm;
|
||||||
sb = sget_fc(fsc, NULL, set_anon_super_fc);
|
sb = sget_fc(fsc, NULL, set_anon_super_fc);
|
||||||
if (IS_ERR(sb)) {
|
if (fsc->s_fs_info)
|
||||||
kfree(fm);
|
fuse_mount_destroy(fm);
|
||||||
|
if (IS_ERR(sb))
|
||||||
return PTR_ERR(sb);
|
return PTR_ERR(sb);
|
||||||
}
|
|
||||||
fm->fc = fuse_conn_get(fc);
|
|
||||||
|
|
||||||
/* Initialize superblock, making @mp_fi its root */
|
/* Initialize superblock, making @mp_fi its root */
|
||||||
err = fuse_fill_super_submount(sb, mp_fi);
|
err = fuse_fill_super_submount(sb, mp_fi);
|
||||||
if (err) {
|
if (err) {
|
||||||
fuse_conn_put(fc);
|
|
||||||
kfree(fm);
|
|
||||||
sb->s_fs_info = NULL;
|
|
||||||
deactivate_locked_super(sb);
|
deactivate_locked_super(sb);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -1569,8 +1557,6 @@ static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc)
|
|||||||
{
|
{
|
||||||
struct fuse_fs_context *ctx = fsc->fs_private;
|
struct fuse_fs_context *ctx = fsc->fs_private;
|
||||||
int err;
|
int err;
|
||||||
struct fuse_conn *fc;
|
|
||||||
struct fuse_mount *fm;
|
|
||||||
|
|
||||||
if (!ctx->file || !ctx->rootmode_present ||
|
if (!ctx->file || !ctx->rootmode_present ||
|
||||||
!ctx->user_id_present || !ctx->group_id_present)
|
!ctx->user_id_present || !ctx->group_id_present)
|
||||||
@ -1580,42 +1566,18 @@ static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc)
|
|||||||
* Require mount to happen from the same user namespace which
|
* Require mount to happen from the same user namespace which
|
||||||
* opened /dev/fuse to prevent potential attacks.
|
* opened /dev/fuse to prevent potential attacks.
|
||||||
*/
|
*/
|
||||||
err = -EINVAL;
|
|
||||||
if ((ctx->file->f_op != &fuse_dev_operations) ||
|
if ((ctx->file->f_op != &fuse_dev_operations) ||
|
||||||
(ctx->file->f_cred->user_ns != sb->s_user_ns))
|
(ctx->file->f_cred->user_ns != sb->s_user_ns))
|
||||||
goto err;
|
return -EINVAL;
|
||||||
ctx->fudptr = &ctx->file->private_data;
|
ctx->fudptr = &ctx->file->private_data;
|
||||||
|
|
||||||
fc = kmalloc(sizeof(*fc), GFP_KERNEL);
|
|
||||||
err = -ENOMEM;
|
|
||||||
if (!fc)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
fm = kzalloc(sizeof(*fm), GFP_KERNEL);
|
|
||||||
if (!fm) {
|
|
||||||
kfree(fc);
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
fuse_conn_init(fc, fm, sb->s_user_ns, &fuse_dev_fiq_ops, NULL);
|
|
||||||
fc->release = fuse_free_conn;
|
|
||||||
|
|
||||||
sb->s_fs_info = fm;
|
|
||||||
|
|
||||||
err = fuse_fill_super_common(sb, ctx);
|
err = fuse_fill_super_common(sb, ctx);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_put_conn;
|
return err;
|
||||||
/* file->private_data shall be visible on all CPUs after this */
|
/* file->private_data shall be visible on all CPUs after this */
|
||||||
smp_mb();
|
smp_mb();
|
||||||
fuse_send_init(get_fuse_mount_super(sb));
|
fuse_send_init(get_fuse_mount_super(sb));
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_put_conn:
|
|
||||||
fuse_conn_put(fc);
|
|
||||||
kfree(fm);
|
|
||||||
sb->s_fs_info = NULL;
|
|
||||||
err:
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1637,22 +1599,40 @@ static int fuse_get_tree(struct fs_context *fsc)
|
|||||||
{
|
{
|
||||||
struct fuse_fs_context *ctx = fsc->fs_private;
|
struct fuse_fs_context *ctx = fsc->fs_private;
|
||||||
struct fuse_dev *fud;
|
struct fuse_dev *fud;
|
||||||
|
struct fuse_conn *fc;
|
||||||
|
struct fuse_mount *fm;
|
||||||
struct super_block *sb;
|
struct super_block *sb;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
fc = kmalloc(sizeof(*fc), GFP_KERNEL);
|
||||||
|
if (!fc)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
fm = kzalloc(sizeof(*fm), GFP_KERNEL);
|
||||||
|
if (!fm) {
|
||||||
|
kfree(fc);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
fuse_conn_init(fc, fm, fsc->user_ns, &fuse_dev_fiq_ops, NULL);
|
||||||
|
fc->release = fuse_free_conn;
|
||||||
|
|
||||||
|
fsc->s_fs_info = fm;
|
||||||
|
|
||||||
if (ctx->fd_present)
|
if (ctx->fd_present)
|
||||||
ctx->file = fget(ctx->fd);
|
ctx->file = fget(ctx->fd);
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_BLOCK) && ctx->is_bdev) {
|
if (IS_ENABLED(CONFIG_BLOCK) && ctx->is_bdev) {
|
||||||
err = get_tree_bdev(fsc, fuse_fill_super);
|
err = get_tree_bdev(fsc, fuse_fill_super);
|
||||||
goto out_fput;
|
goto out;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* While block dev mount can be initialized with a dummy device fd
|
* While block dev mount can be initialized with a dummy device fd
|
||||||
* (found by device name), normal fuse mounts can't
|
* (found by device name), normal fuse mounts can't
|
||||||
*/
|
*/
|
||||||
|
err = -EINVAL;
|
||||||
if (!ctx->file)
|
if (!ctx->file)
|
||||||
return -EINVAL;
|
goto out;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allow creating a fuse mount with an already initialized fuse
|
* Allow creating a fuse mount with an already initialized fuse
|
||||||
@ -1668,7 +1648,9 @@ static int fuse_get_tree(struct fs_context *fsc)
|
|||||||
} else {
|
} else {
|
||||||
err = get_tree_nodev(fsc, fuse_fill_super);
|
err = get_tree_nodev(fsc, fuse_fill_super);
|
||||||
}
|
}
|
||||||
out_fput:
|
out:
|
||||||
|
if (fsc->s_fs_info)
|
||||||
|
fuse_mount_destroy(fm);
|
||||||
if (ctx->file)
|
if (ctx->file)
|
||||||
fput(ctx->file);
|
fput(ctx->file);
|
||||||
return err;
|
return err;
|
||||||
@ -1747,17 +1729,25 @@ static void fuse_sb_destroy(struct super_block *sb)
|
|||||||
struct fuse_mount *fm = get_fuse_mount_super(sb);
|
struct fuse_mount *fm = get_fuse_mount_super(sb);
|
||||||
bool last;
|
bool last;
|
||||||
|
|
||||||
if (fm) {
|
if (sb->s_root) {
|
||||||
last = fuse_mount_remove(fm);
|
last = fuse_mount_remove(fm);
|
||||||
if (last)
|
if (last)
|
||||||
fuse_conn_destroy(fm);
|
fuse_conn_destroy(fm);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void fuse_mount_destroy(struct fuse_mount *fm)
|
||||||
|
{
|
||||||
|
fuse_conn_put(fm->fc);
|
||||||
|
kfree(fm);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(fuse_mount_destroy);
|
||||||
|
|
||||||
static void fuse_kill_sb_anon(struct super_block *sb)
|
static void fuse_kill_sb_anon(struct super_block *sb)
|
||||||
{
|
{
|
||||||
fuse_sb_destroy(sb);
|
fuse_sb_destroy(sb);
|
||||||
kill_anon_super(sb);
|
kill_anon_super(sb);
|
||||||
|
fuse_mount_destroy(get_fuse_mount_super(sb));
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct file_system_type fuse_fs_type = {
|
static struct file_system_type fuse_fs_type = {
|
||||||
@ -1775,6 +1765,7 @@ static void fuse_kill_sb_blk(struct super_block *sb)
|
|||||||
{
|
{
|
||||||
fuse_sb_destroy(sb);
|
fuse_sb_destroy(sb);
|
||||||
kill_block_super(sb);
|
kill_block_super(sb);
|
||||||
|
fuse_mount_destroy(get_fuse_mount_super(sb));
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct file_system_type fuseblk_fs_type = {
|
static struct file_system_type fuseblk_fs_type = {
|
||||||
|
@ -1394,12 +1394,13 @@ static void virtio_kill_sb(struct super_block *sb)
|
|||||||
bool last;
|
bool last;
|
||||||
|
|
||||||
/* If mount failed, we can still be called without any fc */
|
/* If mount failed, we can still be called without any fc */
|
||||||
if (fm) {
|
if (sb->s_root) {
|
||||||
last = fuse_mount_remove(fm);
|
last = fuse_mount_remove(fm);
|
||||||
if (last)
|
if (last)
|
||||||
virtio_fs_conn_destroy(fm);
|
virtio_fs_conn_destroy(fm);
|
||||||
}
|
}
|
||||||
kill_anon_super(sb);
|
kill_anon_super(sb);
|
||||||
|
fuse_mount_destroy(fm);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int virtio_fs_test_super(struct super_block *sb,
|
static int virtio_fs_test_super(struct super_block *sb,
|
||||||
@ -1455,19 +1456,14 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
|
|||||||
|
|
||||||
fsc->s_fs_info = fm;
|
fsc->s_fs_info = fm;
|
||||||
sb = sget_fc(fsc, virtio_fs_test_super, set_anon_super_fc);
|
sb = sget_fc(fsc, virtio_fs_test_super, set_anon_super_fc);
|
||||||
if (fsc->s_fs_info) {
|
if (fsc->s_fs_info)
|
||||||
fuse_conn_put(fc);
|
fuse_mount_destroy(fm);
|
||||||
kfree(fm);
|
|
||||||
}
|
|
||||||
if (IS_ERR(sb))
|
if (IS_ERR(sb))
|
||||||
return PTR_ERR(sb);
|
return PTR_ERR(sb);
|
||||||
|
|
||||||
if (!sb->s_root) {
|
if (!sb->s_root) {
|
||||||
err = virtio_fs_fill_super(sb, fsc);
|
err = virtio_fs_fill_super(sb, fsc);
|
||||||
if (err) {
|
if (err) {
|
||||||
fuse_conn_put(fc);
|
|
||||||
kfree(fm);
|
|
||||||
sb->s_fs_info = NULL;
|
|
||||||
deactivate_locked_super(sb);
|
deactivate_locked_super(sb);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -253,7 +253,7 @@ static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
|
|||||||
pr_warn_once("io-wq is not configured for unbound workers");
|
pr_warn_once("io-wq is not configured for unbound workers");
|
||||||
|
|
||||||
raw_spin_lock(&wqe->lock);
|
raw_spin_lock(&wqe->lock);
|
||||||
if (acct->nr_workers == acct->max_workers) {
|
if (acct->nr_workers >= acct->max_workers) {
|
||||||
raw_spin_unlock(&wqe->lock);
|
raw_spin_unlock(&wqe->lock);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -1291,15 +1291,18 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count)
|
|||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
for_each_node(node) {
|
for_each_node(node) {
|
||||||
|
struct io_wqe *wqe = wq->wqes[node];
|
||||||
struct io_wqe_acct *acct;
|
struct io_wqe_acct *acct;
|
||||||
|
|
||||||
|
raw_spin_lock(&wqe->lock);
|
||||||
for (i = 0; i < IO_WQ_ACCT_NR; i++) {
|
for (i = 0; i < IO_WQ_ACCT_NR; i++) {
|
||||||
acct = &wq->wqes[node]->acct[i];
|
acct = &wqe->acct[i];
|
||||||
prev = max_t(int, acct->max_workers, prev);
|
prev = max_t(int, acct->max_workers, prev);
|
||||||
if (new_count[i])
|
if (new_count[i])
|
||||||
acct->max_workers = new_count[i];
|
acct->max_workers = new_count[i];
|
||||||
new_count[i] = prev;
|
new_count[i] = prev;
|
||||||
}
|
}
|
||||||
|
raw_spin_unlock(&wqe->lock);
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -456,6 +456,8 @@ struct io_ring_ctx {
|
|||||||
struct work_struct exit_work;
|
struct work_struct exit_work;
|
||||||
struct list_head tctx_list;
|
struct list_head tctx_list;
|
||||||
struct completion ref_comp;
|
struct completion ref_comp;
|
||||||
|
u32 iowq_limits[2];
|
||||||
|
bool iowq_limits_set;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1368,11 +1370,6 @@ static void io_req_track_inflight(struct io_kiocb *req)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void io_unprep_linked_timeout(struct io_kiocb *req)
|
|
||||||
{
|
|
||||||
req->flags &= ~REQ_F_LINK_TIMEOUT;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
|
static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
|
||||||
{
|
{
|
||||||
if (WARN_ON_ONCE(!req->link))
|
if (WARN_ON_ONCE(!req->link))
|
||||||
@ -6983,7 +6980,7 @@ static void __io_queue_sqe(struct io_kiocb *req)
|
|||||||
switch (io_arm_poll_handler(req)) {
|
switch (io_arm_poll_handler(req)) {
|
||||||
case IO_APOLL_READY:
|
case IO_APOLL_READY:
|
||||||
if (linked_timeout)
|
if (linked_timeout)
|
||||||
io_unprep_linked_timeout(req);
|
io_queue_linked_timeout(linked_timeout);
|
||||||
goto issue_sqe;
|
goto issue_sqe;
|
||||||
case IO_APOLL_ABORTED:
|
case IO_APOLL_ABORTED:
|
||||||
/*
|
/*
|
||||||
@ -9638,7 +9635,16 @@ static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
|
|||||||
ret = io_uring_alloc_task_context(current, ctx);
|
ret = io_uring_alloc_task_context(current, ctx);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
tctx = current->io_uring;
|
tctx = current->io_uring;
|
||||||
|
if (ctx->iowq_limits_set) {
|
||||||
|
unsigned int limits[2] = { ctx->iowq_limits[0],
|
||||||
|
ctx->iowq_limits[1], };
|
||||||
|
|
||||||
|
ret = io_wq_max_workers(tctx->io_wq, limits);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
|
if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
|
||||||
node = kmalloc(sizeof(*node), GFP_KERNEL);
|
node = kmalloc(sizeof(*node), GFP_KERNEL);
|
||||||
@ -10643,7 +10649,9 @@ static int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
|
|||||||
|
|
||||||
static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
|
static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
|
||||||
void __user *arg)
|
void __user *arg)
|
||||||
|
__must_hold(&ctx->uring_lock)
|
||||||
{
|
{
|
||||||
|
struct io_tctx_node *node;
|
||||||
struct io_uring_task *tctx = NULL;
|
struct io_uring_task *tctx = NULL;
|
||||||
struct io_sq_data *sqd = NULL;
|
struct io_sq_data *sqd = NULL;
|
||||||
__u32 new_count[2];
|
__u32 new_count[2];
|
||||||
@ -10674,13 +10682,19 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
|
|||||||
tctx = current->io_uring;
|
tctx = current->io_uring;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = -EINVAL;
|
BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
|
||||||
if (!tctx || !tctx->io_wq)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
|
memcpy(ctx->iowq_limits, new_count, sizeof(new_count));
|
||||||
|
ctx->iowq_limits_set = true;
|
||||||
|
|
||||||
|
ret = -EINVAL;
|
||||||
|
if (tctx && tctx->io_wq) {
|
||||||
ret = io_wq_max_workers(tctx->io_wq, new_count);
|
ret = io_wq_max_workers(tctx->io_wq, new_count);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
} else {
|
||||||
|
memset(new_count, 0, sizeof(new_count));
|
||||||
|
}
|
||||||
|
|
||||||
if (sqd) {
|
if (sqd) {
|
||||||
mutex_unlock(&sqd->lock);
|
mutex_unlock(&sqd->lock);
|
||||||
@ -10690,6 +10704,22 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
|
|||||||
if (copy_to_user(arg, new_count, sizeof(new_count)))
|
if (copy_to_user(arg, new_count, sizeof(new_count)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
|
/* that's it for SQPOLL, only the SQPOLL task creates requests */
|
||||||
|
if (sqd)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* now propagate the restriction to all registered users */
|
||||||
|
list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
|
||||||
|
struct io_uring_task *tctx = node->task->io_uring;
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(!tctx->io_wq))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(new_count); i++)
|
||||||
|
new_count[i] = ctx->iowq_limits[i];
|
||||||
|
/* ignore errors, it always returns zero anyway */
|
||||||
|
(void)io_wq_max_workers(tctx->io_wq, new_count);
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
err:
|
err:
|
||||||
if (sqd) {
|
if (sqd) {
|
||||||
|
@ -178,7 +178,7 @@ int kernel_read_file_from_fd(int fd, loff_t offset, void **buf,
|
|||||||
struct fd f = fdget(fd);
|
struct fd f = fdget(fd);
|
||||||
int ret = -EBADF;
|
int ret = -EBADF;
|
||||||
|
|
||||||
if (!f.file)
|
if (!f.file || !(f.file->f_mode & FMODE_READ))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
ret = kernel_read_file(f.file, offset, buf, buf_size, file_size, id);
|
ret = kernel_read_file(f.file, offset, buf, buf_size, file_size, id);
|
||||||
|
@ -298,8 +298,8 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
|
|||||||
int blob_len, struct ksmbd_session *sess)
|
int blob_len, struct ksmbd_session *sess)
|
||||||
{
|
{
|
||||||
char *domain_name;
|
char *domain_name;
|
||||||
unsigned int lm_off, nt_off;
|
unsigned int nt_off, dn_off;
|
||||||
unsigned short nt_len;
|
unsigned short nt_len, dn_len;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (blob_len < sizeof(struct authenticate_message)) {
|
if (blob_len < sizeof(struct authenticate_message)) {
|
||||||
@ -314,15 +314,17 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
lm_off = le32_to_cpu(authblob->LmChallengeResponse.BufferOffset);
|
|
||||||
nt_off = le32_to_cpu(authblob->NtChallengeResponse.BufferOffset);
|
nt_off = le32_to_cpu(authblob->NtChallengeResponse.BufferOffset);
|
||||||
nt_len = le16_to_cpu(authblob->NtChallengeResponse.Length);
|
nt_len = le16_to_cpu(authblob->NtChallengeResponse.Length);
|
||||||
|
dn_off = le32_to_cpu(authblob->DomainName.BufferOffset);
|
||||||
|
dn_len = le16_to_cpu(authblob->DomainName.Length);
|
||||||
|
|
||||||
|
if (blob_len < (u64)dn_off + dn_len || blob_len < (u64)nt_off + nt_len)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/* TODO : use domain name that imported from configuration file */
|
/* TODO : use domain name that imported from configuration file */
|
||||||
domain_name = smb_strndup_from_utf16((const char *)authblob +
|
domain_name = smb_strndup_from_utf16((const char *)authblob + dn_off,
|
||||||
le32_to_cpu(authblob->DomainName.BufferOffset),
|
dn_len, true, sess->conn->local_nls);
|
||||||
le16_to_cpu(authblob->DomainName.Length), true,
|
|
||||||
sess->conn->local_nls);
|
|
||||||
if (IS_ERR(domain_name))
|
if (IS_ERR(domain_name))
|
||||||
return PTR_ERR(domain_name);
|
return PTR_ERR(domain_name);
|
||||||
|
|
||||||
|
@ -61,6 +61,8 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
|
|||||||
conn->local_nls = load_nls_default();
|
conn->local_nls = load_nls_default();
|
||||||
atomic_set(&conn->req_running, 0);
|
atomic_set(&conn->req_running, 0);
|
||||||
atomic_set(&conn->r_count, 0);
|
atomic_set(&conn->r_count, 0);
|
||||||
|
conn->total_credits = 1;
|
||||||
|
|
||||||
init_waitqueue_head(&conn->req_running_q);
|
init_waitqueue_head(&conn->req_running_q);
|
||||||
INIT_LIST_HEAD(&conn->conns_list);
|
INIT_LIST_HEAD(&conn->conns_list);
|
||||||
INIT_LIST_HEAD(&conn->sessions);
|
INIT_LIST_HEAD(&conn->sessions);
|
||||||
@ -296,10 +298,12 @@ int ksmbd_conn_handler_loop(void *p)
|
|||||||
pdu_size = get_rfc1002_len(hdr_buf);
|
pdu_size = get_rfc1002_len(hdr_buf);
|
||||||
ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
|
ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
|
||||||
|
|
||||||
/* make sure we have enough to get to SMB header end */
|
/*
|
||||||
if (!ksmbd_pdu_size_has_room(pdu_size)) {
|
* Check if pdu size is valid (min : smb header size,
|
||||||
ksmbd_debug(CONN, "SMB request too short (%u bytes)\n",
|
* max : 0x00FFFFFF).
|
||||||
pdu_size);
|
*/
|
||||||
|
if (pdu_size < __SMB2_HEADER_STRUCTURE_SIZE ||
|
||||||
|
pdu_size > MAX_STREAM_PROT_LEN) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -211,6 +211,7 @@ struct ksmbd_tree_disconnect_request {
|
|||||||
*/
|
*/
|
||||||
struct ksmbd_logout_request {
|
struct ksmbd_logout_request {
|
||||||
__s8 account[KSMBD_REQ_MAX_ACCOUNT_NAME_SZ]; /* user account name */
|
__s8 account[KSMBD_REQ_MAX_ACCOUNT_NAME_SZ]; /* user account name */
|
||||||
|
__u32 account_flags;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -317,6 +318,7 @@ enum KSMBD_TREE_CONN_STATUS {
|
|||||||
#define KSMBD_USER_FLAG_BAD_UID BIT(2)
|
#define KSMBD_USER_FLAG_BAD_UID BIT(2)
|
||||||
#define KSMBD_USER_FLAG_BAD_USER BIT(3)
|
#define KSMBD_USER_FLAG_BAD_USER BIT(3)
|
||||||
#define KSMBD_USER_FLAG_GUEST_ACCOUNT BIT(4)
|
#define KSMBD_USER_FLAG_GUEST_ACCOUNT BIT(4)
|
||||||
|
#define KSMBD_USER_FLAG_DELAY_SESSION BIT(5)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Share config flags.
|
* Share config flags.
|
||||||
|
@ -55,7 +55,7 @@ struct ksmbd_user *ksmbd_alloc_user(struct ksmbd_login_response *resp)
|
|||||||
|
|
||||||
void ksmbd_free_user(struct ksmbd_user *user)
|
void ksmbd_free_user(struct ksmbd_user *user)
|
||||||
{
|
{
|
||||||
ksmbd_ipc_logout_request(user->name);
|
ksmbd_ipc_logout_request(user->name, user->flags);
|
||||||
kfree(user->name);
|
kfree(user->name);
|
||||||
kfree(user->passkey);
|
kfree(user->passkey);
|
||||||
kfree(user);
|
kfree(user);
|
||||||
|
@ -18,6 +18,7 @@ struct ksmbd_user {
|
|||||||
|
|
||||||
size_t passkey_sz;
|
size_t passkey_sz;
|
||||||
char *passkey;
|
char *passkey;
|
||||||
|
unsigned int failed_login_count;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline bool user_guest(struct ksmbd_user *user)
|
static inline bool user_guest(struct ksmbd_user *user)
|
||||||
|
@ -284,11 +284,13 @@ static inline int smb2_ioctl_resp_len(struct smb2_ioctl_req *h)
|
|||||||
le32_to_cpu(h->MaxOutputResponse);
|
le32_to_cpu(h->MaxOutputResponse);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int smb2_validate_credit_charge(struct smb2_hdr *hdr)
|
static int smb2_validate_credit_charge(struct ksmbd_conn *conn,
|
||||||
|
struct smb2_hdr *hdr)
|
||||||
{
|
{
|
||||||
int req_len = 0, expect_resp_len = 0, calc_credit_num, max_len;
|
unsigned int req_len = 0, expect_resp_len = 0, calc_credit_num, max_len;
|
||||||
int credit_charge = le16_to_cpu(hdr->CreditCharge);
|
unsigned short credit_charge = le16_to_cpu(hdr->CreditCharge);
|
||||||
void *__hdr = hdr;
|
void *__hdr = hdr;
|
||||||
|
int ret;
|
||||||
|
|
||||||
switch (hdr->Command) {
|
switch (hdr->Command) {
|
||||||
case SMB2_QUERY_INFO:
|
case SMB2_QUERY_INFO:
|
||||||
@ -310,21 +312,37 @@ static int smb2_validate_credit_charge(struct smb2_hdr *hdr)
|
|||||||
req_len = smb2_ioctl_req_len(__hdr);
|
req_len = smb2_ioctl_req_len(__hdr);
|
||||||
expect_resp_len = smb2_ioctl_resp_len(__hdr);
|
expect_resp_len = smb2_ioctl_resp_len(__hdr);
|
||||||
break;
|
break;
|
||||||
default:
|
case SMB2_CANCEL:
|
||||||
return 0;
|
return 0;
|
||||||
|
default:
|
||||||
|
req_len = 1;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
credit_charge = max(1, credit_charge);
|
credit_charge = max_t(unsigned short, credit_charge, 1);
|
||||||
max_len = max(req_len, expect_resp_len);
|
max_len = max_t(unsigned int, req_len, expect_resp_len);
|
||||||
calc_credit_num = DIV_ROUND_UP(max_len, SMB2_MAX_BUFFER_SIZE);
|
calc_credit_num = DIV_ROUND_UP(max_len, SMB2_MAX_BUFFER_SIZE);
|
||||||
|
|
||||||
if (credit_charge < calc_credit_num) {
|
if (credit_charge < calc_credit_num) {
|
||||||
pr_err("Insufficient credit charge, given: %d, needed: %d\n",
|
ksmbd_debug(SMB, "Insufficient credit charge, given: %d, needed: %d\n",
|
||||||
credit_charge, calc_credit_num);
|
credit_charge, calc_credit_num);
|
||||||
return 1;
|
return 1;
|
||||||
|
} else if (credit_charge > conn->max_credits) {
|
||||||
|
ksmbd_debug(SMB, "Too large credit charge: %d\n", credit_charge);
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
spin_lock(&conn->credits_lock);
|
||||||
|
if (credit_charge <= conn->total_credits) {
|
||||||
|
conn->total_credits -= credit_charge;
|
||||||
|
ret = 0;
|
||||||
|
} else {
|
||||||
|
ksmbd_debug(SMB, "Insufficient credits granted, given: %u, granted: %u\n",
|
||||||
|
credit_charge, conn->total_credits);
|
||||||
|
ret = 1;
|
||||||
|
}
|
||||||
|
spin_unlock(&conn->credits_lock);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ksmbd_smb2_check_message(struct ksmbd_work *work)
|
int ksmbd_smb2_check_message(struct ksmbd_work *work)
|
||||||
@ -382,26 +400,20 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((work->conn->vals->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU) &&
|
|
||||||
smb2_validate_credit_charge(hdr)) {
|
|
||||||
work->conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (smb2_calc_size(hdr, &clc_len))
|
if (smb2_calc_size(hdr, &clc_len))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (len != clc_len) {
|
if (len != clc_len) {
|
||||||
/* client can return one byte more due to implied bcc[0] */
|
/* client can return one byte more due to implied bcc[0] */
|
||||||
if (clc_len == len + 1)
|
if (clc_len == len + 1)
|
||||||
return 0;
|
goto validate_credit;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Some windows servers (win2016) will pad also the final
|
* Some windows servers (win2016) will pad also the final
|
||||||
* PDU in a compound to 8 bytes.
|
* PDU in a compound to 8 bytes.
|
||||||
*/
|
*/
|
||||||
if (ALIGN(clc_len, 8) == len)
|
if (ALIGN(clc_len, 8) == len)
|
||||||
return 0;
|
goto validate_credit;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* windows client also pad up to 8 bytes when compounding.
|
* windows client also pad up to 8 bytes when compounding.
|
||||||
@ -414,7 +426,7 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
|
|||||||
"cli req padded more than expected. Length %d not %d for cmd:%d mid:%llu\n",
|
"cli req padded more than expected. Length %d not %d for cmd:%d mid:%llu\n",
|
||||||
len, clc_len, command,
|
len, clc_len, command,
|
||||||
le64_to_cpu(hdr->MessageId));
|
le64_to_cpu(hdr->MessageId));
|
||||||
return 0;
|
goto validate_credit;
|
||||||
}
|
}
|
||||||
|
|
||||||
ksmbd_debug(SMB,
|
ksmbd_debug(SMB,
|
||||||
@ -425,6 +437,13 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
validate_credit:
|
||||||
|
if ((work->conn->vals->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU) &&
|
||||||
|
smb2_validate_credit_charge(work->conn, hdr)) {
|
||||||
|
work->conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -284,6 +284,7 @@ int init_smb3_11_server(struct ksmbd_conn *conn)
|
|||||||
|
|
||||||
void init_smb2_max_read_size(unsigned int sz)
|
void init_smb2_max_read_size(unsigned int sz)
|
||||||
{
|
{
|
||||||
|
sz = clamp_val(sz, SMB3_MIN_IOSIZE, SMB3_MAX_IOSIZE);
|
||||||
smb21_server_values.max_read_size = sz;
|
smb21_server_values.max_read_size = sz;
|
||||||
smb30_server_values.max_read_size = sz;
|
smb30_server_values.max_read_size = sz;
|
||||||
smb302_server_values.max_read_size = sz;
|
smb302_server_values.max_read_size = sz;
|
||||||
@ -292,6 +293,7 @@ void init_smb2_max_read_size(unsigned int sz)
|
|||||||
|
|
||||||
void init_smb2_max_write_size(unsigned int sz)
|
void init_smb2_max_write_size(unsigned int sz)
|
||||||
{
|
{
|
||||||
|
sz = clamp_val(sz, SMB3_MIN_IOSIZE, SMB3_MAX_IOSIZE);
|
||||||
smb21_server_values.max_write_size = sz;
|
smb21_server_values.max_write_size = sz;
|
||||||
smb30_server_values.max_write_size = sz;
|
smb30_server_values.max_write_size = sz;
|
||||||
smb302_server_values.max_write_size = sz;
|
smb302_server_values.max_write_size = sz;
|
||||||
@ -300,6 +302,7 @@ void init_smb2_max_write_size(unsigned int sz)
|
|||||||
|
|
||||||
void init_smb2_max_trans_size(unsigned int sz)
|
void init_smb2_max_trans_size(unsigned int sz)
|
||||||
{
|
{
|
||||||
|
sz = clamp_val(sz, SMB3_MIN_IOSIZE, SMB3_MAX_IOSIZE);
|
||||||
smb21_server_values.max_trans_size = sz;
|
smb21_server_values.max_trans_size = sz;
|
||||||
smb30_server_values.max_trans_size = sz;
|
smb30_server_values.max_trans_size = sz;
|
||||||
smb302_server_values.max_trans_size = sz;
|
smb302_server_values.max_trans_size = sz;
|
||||||
|
@ -292,22 +292,6 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int smb2_consume_credit_charge(struct ksmbd_work *work,
|
|
||||||
unsigned short credit_charge)
|
|
||||||
{
|
|
||||||
struct ksmbd_conn *conn = work->conn;
|
|
||||||
unsigned int rsp_credits = 1;
|
|
||||||
|
|
||||||
if (!conn->total_credits)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (credit_charge > 0)
|
|
||||||
rsp_credits = credit_charge;
|
|
||||||
|
|
||||||
conn->total_credits -= rsp_credits;
|
|
||||||
return rsp_credits;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* smb2_set_rsp_credits() - set number of credits in response buffer
|
* smb2_set_rsp_credits() - set number of credits in response buffer
|
||||||
* @work: smb work containing smb response buffer
|
* @work: smb work containing smb response buffer
|
||||||
@ -317,49 +301,43 @@ int smb2_set_rsp_credits(struct ksmbd_work *work)
|
|||||||
struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work);
|
struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work);
|
||||||
struct smb2_hdr *hdr = ksmbd_resp_buf_next(work);
|
struct smb2_hdr *hdr = ksmbd_resp_buf_next(work);
|
||||||
struct ksmbd_conn *conn = work->conn;
|
struct ksmbd_conn *conn = work->conn;
|
||||||
unsigned short credits_requested = le16_to_cpu(req_hdr->CreditRequest);
|
unsigned short credits_requested;
|
||||||
unsigned short credit_charge = 1, credits_granted = 0;
|
unsigned short credit_charge, credits_granted = 0;
|
||||||
unsigned short aux_max, aux_credits, min_credits;
|
unsigned short aux_max, aux_credits;
|
||||||
int rsp_credit_charge;
|
|
||||||
|
|
||||||
if (hdr->Command == SMB2_CANCEL)
|
if (work->send_no_response)
|
||||||
goto out;
|
return 0;
|
||||||
|
|
||||||
/* get default minimum credits by shifting maximum credits by 4 */
|
hdr->CreditCharge = req_hdr->CreditCharge;
|
||||||
min_credits = conn->max_credits >> 4;
|
|
||||||
|
|
||||||
if (conn->total_credits >= conn->max_credits) {
|
if (conn->total_credits > conn->max_credits) {
|
||||||
|
hdr->CreditRequest = 0;
|
||||||
pr_err("Total credits overflow: %d\n", conn->total_credits);
|
pr_err("Total credits overflow: %d\n", conn->total_credits);
|
||||||
conn->total_credits = min_credits;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
rsp_credit_charge =
|
credit_charge = max_t(unsigned short,
|
||||||
smb2_consume_credit_charge(work, le16_to_cpu(req_hdr->CreditCharge));
|
le16_to_cpu(req_hdr->CreditCharge), 1);
|
||||||
if (rsp_credit_charge < 0)
|
credits_requested = max_t(unsigned short,
|
||||||
return -EINVAL;
|
le16_to_cpu(req_hdr->CreditRequest), 1);
|
||||||
|
|
||||||
hdr->CreditCharge = cpu_to_le16(rsp_credit_charge);
|
/* according to smb2.credits smbtorture, Windows server
|
||||||
|
* 2016 or later grant up to 8192 credits at once.
|
||||||
if (credits_requested > 0) {
|
*
|
||||||
aux_credits = credits_requested - 1;
|
|
||||||
aux_max = 32;
|
|
||||||
if (hdr->Command == SMB2_NEGOTIATE)
|
|
||||||
aux_max = 0;
|
|
||||||
aux_credits = (aux_credits < aux_max) ? aux_credits : aux_max;
|
|
||||||
credits_granted = aux_credits + credit_charge;
|
|
||||||
|
|
||||||
/* if credits granted per client is getting bigger than default
|
|
||||||
* minimum credits then we should wrap it up within the limits.
|
|
||||||
*/
|
|
||||||
if ((conn->total_credits + credits_granted) > min_credits)
|
|
||||||
credits_granted = min_credits - conn->total_credits;
|
|
||||||
/*
|
|
||||||
* TODO: Need to adjuct CreditRequest value according to
|
* TODO: Need to adjuct CreditRequest value according to
|
||||||
* current cpu load
|
* current cpu load
|
||||||
*/
|
*/
|
||||||
} else if (conn->total_credits == 0) {
|
aux_credits = credits_requested - 1;
|
||||||
credits_granted = 1;
|
if (hdr->Command == SMB2_NEGOTIATE)
|
||||||
}
|
aux_max = 0;
|
||||||
|
else
|
||||||
|
aux_max = conn->max_credits - credit_charge;
|
||||||
|
aux_credits = min_t(unsigned short, aux_credits, aux_max);
|
||||||
|
credits_granted = credit_charge + aux_credits;
|
||||||
|
|
||||||
|
if (conn->max_credits - conn->total_credits < credits_granted)
|
||||||
|
credits_granted = conn->max_credits -
|
||||||
|
conn->total_credits;
|
||||||
|
|
||||||
conn->total_credits += credits_granted;
|
conn->total_credits += credits_granted;
|
||||||
work->credits_granted += credits_granted;
|
work->credits_granted += credits_granted;
|
||||||
@ -368,7 +346,6 @@ int smb2_set_rsp_credits(struct ksmbd_work *work)
|
|||||||
/* Update CreditRequest in last request */
|
/* Update CreditRequest in last request */
|
||||||
hdr->CreditRequest = cpu_to_le16(work->credits_granted);
|
hdr->CreditRequest = cpu_to_le16(work->credits_granted);
|
||||||
}
|
}
|
||||||
out:
|
|
||||||
ksmbd_debug(SMB,
|
ksmbd_debug(SMB,
|
||||||
"credits: requested[%d] granted[%d] total_granted[%d]\n",
|
"credits: requested[%d] granted[%d] total_granted[%d]\n",
|
||||||
credits_requested, credits_granted,
|
credits_requested, credits_granted,
|
||||||
@ -472,6 +449,12 @@ bool is_chained_smb2_message(struct ksmbd_work *work)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ((u64)get_rfc1002_len(work->response_buf) + MAX_CIFS_SMALL_BUFFER_SIZE >
|
||||||
|
work->response_sz) {
|
||||||
|
pr_err("next response offset exceeds response buffer size\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
ksmbd_debug(SMB, "got SMB2 chained command\n");
|
ksmbd_debug(SMB, "got SMB2 chained command\n");
|
||||||
init_chained_smb2_rsp(work);
|
init_chained_smb2_rsp(work);
|
||||||
return true;
|
return true;
|
||||||
@ -541,7 +524,7 @@ int smb2_allocate_rsp_buf(struct ksmbd_work *work)
|
|||||||
{
|
{
|
||||||
struct smb2_hdr *hdr = work->request_buf;
|
struct smb2_hdr *hdr = work->request_buf;
|
||||||
size_t small_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
|
size_t small_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
|
||||||
size_t large_sz = work->conn->vals->max_trans_size + MAX_SMB2_HDR_SIZE;
|
size_t large_sz = small_sz + work->conn->vals->max_trans_size;
|
||||||
size_t sz = small_sz;
|
size_t sz = small_sz;
|
||||||
int cmd = le16_to_cpu(hdr->Command);
|
int cmd = le16_to_cpu(hdr->Command);
|
||||||
|
|
||||||
@ -1274,19 +1257,13 @@ static int generate_preauth_hash(struct ksmbd_work *work)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_negotiation_token(struct ksmbd_work *work,
|
static int decode_negotiation_token(struct ksmbd_conn *conn,
|
||||||
struct negotiate_message *negblob)
|
struct negotiate_message *negblob,
|
||||||
|
size_t sz)
|
||||||
{
|
{
|
||||||
struct ksmbd_conn *conn = work->conn;
|
|
||||||
struct smb2_sess_setup_req *req;
|
|
||||||
int sz;
|
|
||||||
|
|
||||||
if (!conn->use_spnego)
|
if (!conn->use_spnego)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
req = work->request_buf;
|
|
||||||
sz = le16_to_cpu(req->SecurityBufferLength);
|
|
||||||
|
|
||||||
if (ksmbd_decode_negTokenInit((char *)negblob, sz, conn)) {
|
if (ksmbd_decode_negTokenInit((char *)negblob, sz, conn)) {
|
||||||
if (ksmbd_decode_negTokenTarg((char *)negblob, sz, conn)) {
|
if (ksmbd_decode_negTokenTarg((char *)negblob, sz, conn)) {
|
||||||
conn->auth_mechs |= KSMBD_AUTH_NTLMSSP;
|
conn->auth_mechs |= KSMBD_AUTH_NTLMSSP;
|
||||||
@ -1298,9 +1275,9 @@ static int decode_negotiation_token(struct ksmbd_work *work,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int ntlm_negotiate(struct ksmbd_work *work,
|
static int ntlm_negotiate(struct ksmbd_work *work,
|
||||||
struct negotiate_message *negblob)
|
struct negotiate_message *negblob,
|
||||||
|
size_t negblob_len)
|
||||||
{
|
{
|
||||||
struct smb2_sess_setup_req *req = work->request_buf;
|
|
||||||
struct smb2_sess_setup_rsp *rsp = work->response_buf;
|
struct smb2_sess_setup_rsp *rsp = work->response_buf;
|
||||||
struct challenge_message *chgblob;
|
struct challenge_message *chgblob;
|
||||||
unsigned char *spnego_blob = NULL;
|
unsigned char *spnego_blob = NULL;
|
||||||
@ -1309,8 +1286,7 @@ static int ntlm_negotiate(struct ksmbd_work *work,
|
|||||||
int sz, rc;
|
int sz, rc;
|
||||||
|
|
||||||
ksmbd_debug(SMB, "negotiate phase\n");
|
ksmbd_debug(SMB, "negotiate phase\n");
|
||||||
sz = le16_to_cpu(req->SecurityBufferLength);
|
rc = ksmbd_decode_ntlmssp_neg_blob(negblob, negblob_len, work->sess);
|
||||||
rc = ksmbd_decode_ntlmssp_neg_blob(negblob, sz, work->sess);
|
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
@ -1378,12 +1354,23 @@ static struct ksmbd_user *session_user(struct ksmbd_conn *conn,
|
|||||||
struct authenticate_message *authblob;
|
struct authenticate_message *authblob;
|
||||||
struct ksmbd_user *user;
|
struct ksmbd_user *user;
|
||||||
char *name;
|
char *name;
|
||||||
int sz;
|
unsigned int auth_msg_len, name_off, name_len, secbuf_len;
|
||||||
|
|
||||||
|
secbuf_len = le16_to_cpu(req->SecurityBufferLength);
|
||||||
|
if (secbuf_len < sizeof(struct authenticate_message)) {
|
||||||
|
ksmbd_debug(SMB, "blob len %d too small\n", secbuf_len);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
authblob = user_authblob(conn, req);
|
authblob = user_authblob(conn, req);
|
||||||
sz = le32_to_cpu(authblob->UserName.BufferOffset);
|
name_off = le32_to_cpu(authblob->UserName.BufferOffset);
|
||||||
name = smb_strndup_from_utf16((const char *)authblob + sz,
|
name_len = le16_to_cpu(authblob->UserName.Length);
|
||||||
le16_to_cpu(authblob->UserName.Length),
|
auth_msg_len = le16_to_cpu(req->SecurityBufferOffset) + secbuf_len;
|
||||||
|
|
||||||
|
if (auth_msg_len < (u64)name_off + name_len)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
name = smb_strndup_from_utf16((const char *)authblob + name_off,
|
||||||
|
name_len,
|
||||||
true,
|
true,
|
||||||
conn->local_nls);
|
conn->local_nls);
|
||||||
if (IS_ERR(name)) {
|
if (IS_ERR(name)) {
|
||||||
@ -1629,6 +1616,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
|
|||||||
struct smb2_sess_setup_rsp *rsp = work->response_buf;
|
struct smb2_sess_setup_rsp *rsp = work->response_buf;
|
||||||
struct ksmbd_session *sess;
|
struct ksmbd_session *sess;
|
||||||
struct negotiate_message *negblob;
|
struct negotiate_message *negblob;
|
||||||
|
unsigned int negblob_len, negblob_off;
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
ksmbd_debug(SMB, "Received request for session setup\n");
|
ksmbd_debug(SMB, "Received request for session setup\n");
|
||||||
@ -1709,10 +1697,16 @@ int smb2_sess_setup(struct ksmbd_work *work)
|
|||||||
if (sess->state == SMB2_SESSION_EXPIRED)
|
if (sess->state == SMB2_SESSION_EXPIRED)
|
||||||
sess->state = SMB2_SESSION_IN_PROGRESS;
|
sess->state = SMB2_SESSION_IN_PROGRESS;
|
||||||
|
|
||||||
negblob = (struct negotiate_message *)((char *)&req->hdr.ProtocolId +
|
negblob_off = le16_to_cpu(req->SecurityBufferOffset);
|
||||||
le16_to_cpu(req->SecurityBufferOffset));
|
negblob_len = le16_to_cpu(req->SecurityBufferLength);
|
||||||
|
if (negblob_off < (offsetof(struct smb2_sess_setup_req, Buffer) - 4) ||
|
||||||
|
negblob_len < offsetof(struct negotiate_message, NegotiateFlags))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (decode_negotiation_token(work, negblob) == 0) {
|
negblob = (struct negotiate_message *)((char *)&req->hdr.ProtocolId +
|
||||||
|
negblob_off);
|
||||||
|
|
||||||
|
if (decode_negotiation_token(conn, negblob, negblob_len) == 0) {
|
||||||
if (conn->mechToken)
|
if (conn->mechToken)
|
||||||
negblob = (struct negotiate_message *)conn->mechToken;
|
negblob = (struct negotiate_message *)conn->mechToken;
|
||||||
}
|
}
|
||||||
@ -1736,7 +1730,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
|
|||||||
sess->Preauth_HashValue = NULL;
|
sess->Preauth_HashValue = NULL;
|
||||||
} else if (conn->preferred_auth_mech == KSMBD_AUTH_NTLMSSP) {
|
} else if (conn->preferred_auth_mech == KSMBD_AUTH_NTLMSSP) {
|
||||||
if (negblob->MessageType == NtLmNegotiate) {
|
if (negblob->MessageType == NtLmNegotiate) {
|
||||||
rc = ntlm_negotiate(work, negblob);
|
rc = ntlm_negotiate(work, negblob, negblob_len);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto out_err;
|
goto out_err;
|
||||||
rsp->hdr.Status =
|
rsp->hdr.Status =
|
||||||
@ -1796,9 +1790,30 @@ int smb2_sess_setup(struct ksmbd_work *work)
|
|||||||
conn->mechToken = NULL;
|
conn->mechToken = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rc < 0 && sess) {
|
if (rc < 0) {
|
||||||
|
/*
|
||||||
|
* SecurityBufferOffset should be set to zero
|
||||||
|
* in session setup error response.
|
||||||
|
*/
|
||||||
|
rsp->SecurityBufferOffset = 0;
|
||||||
|
|
||||||
|
if (sess) {
|
||||||
|
bool try_delay = false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* To avoid dictionary attacks (repeated session setups rapidly sent) to
|
||||||
|
* connect to server, ksmbd make a delay of a 5 seconds on session setup
|
||||||
|
* failure to make it harder to send enough random connection requests
|
||||||
|
* to break into a server.
|
||||||
|
*/
|
||||||
|
if (sess->user && sess->user->flags & KSMBD_USER_FLAG_DELAY_SESSION)
|
||||||
|
try_delay = true;
|
||||||
|
|
||||||
ksmbd_session_destroy(sess);
|
ksmbd_session_destroy(sess);
|
||||||
work->sess = NULL;
|
work->sess = NULL;
|
||||||
|
if (try_delay)
|
||||||
|
ssleep(5);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
@ -3779,6 +3794,24 @@ static int verify_info_level(int info_level)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int smb2_calc_max_out_buf_len(struct ksmbd_work *work,
|
||||||
|
unsigned short hdr2_len,
|
||||||
|
unsigned int out_buf_len)
|
||||||
|
{
|
||||||
|
int free_len;
|
||||||
|
|
||||||
|
if (out_buf_len > work->conn->vals->max_trans_size)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
free_len = (int)(work->response_sz -
|
||||||
|
(get_rfc1002_len(work->response_buf) + 4)) -
|
||||||
|
hdr2_len;
|
||||||
|
if (free_len < 0)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
return min_t(int, out_buf_len, free_len);
|
||||||
|
}
|
||||||
|
|
||||||
int smb2_query_dir(struct ksmbd_work *work)
|
int smb2_query_dir(struct ksmbd_work *work)
|
||||||
{
|
{
|
||||||
struct ksmbd_conn *conn = work->conn;
|
struct ksmbd_conn *conn = work->conn;
|
||||||
@ -3855,9 +3888,13 @@ int smb2_query_dir(struct ksmbd_work *work)
|
|||||||
memset(&d_info, 0, sizeof(struct ksmbd_dir_info));
|
memset(&d_info, 0, sizeof(struct ksmbd_dir_info));
|
||||||
d_info.wptr = (char *)rsp->Buffer;
|
d_info.wptr = (char *)rsp->Buffer;
|
||||||
d_info.rptr = (char *)rsp->Buffer;
|
d_info.rptr = (char *)rsp->Buffer;
|
||||||
d_info.out_buf_len = (work->response_sz - (get_rfc1002_len(rsp_org) + 4));
|
d_info.out_buf_len =
|
||||||
d_info.out_buf_len = min_t(int, d_info.out_buf_len, le32_to_cpu(req->OutputBufferLength)) -
|
smb2_calc_max_out_buf_len(work, 8,
|
||||||
sizeof(struct smb2_query_directory_rsp);
|
le32_to_cpu(req->OutputBufferLength));
|
||||||
|
if (d_info.out_buf_len < 0) {
|
||||||
|
rc = -EINVAL;
|
||||||
|
goto err_out;
|
||||||
|
}
|
||||||
d_info.flags = srch_flag;
|
d_info.flags = srch_flag;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -4091,12 +4128,11 @@ static int smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp,
|
|||||||
le32_to_cpu(req->Flags));
|
le32_to_cpu(req->Flags));
|
||||||
}
|
}
|
||||||
|
|
||||||
buf_free_len = work->response_sz -
|
buf_free_len =
|
||||||
(get_rfc1002_len(rsp_org) + 4) -
|
smb2_calc_max_out_buf_len(work, 8,
|
||||||
sizeof(struct smb2_query_info_rsp);
|
le32_to_cpu(req->OutputBufferLength));
|
||||||
|
if (buf_free_len < 0)
|
||||||
if (le32_to_cpu(req->OutputBufferLength) < buf_free_len)
|
return -EINVAL;
|
||||||
buf_free_len = le32_to_cpu(req->OutputBufferLength);
|
|
||||||
|
|
||||||
rc = ksmbd_vfs_listxattr(path->dentry, &xattr_list);
|
rc = ksmbd_vfs_listxattr(path->dentry, &xattr_list);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
@ -4407,6 +4443,8 @@ static void get_file_stream_info(struct ksmbd_work *work,
|
|||||||
struct path *path = &fp->filp->f_path;
|
struct path *path = &fp->filp->f_path;
|
||||||
ssize_t xattr_list_len;
|
ssize_t xattr_list_len;
|
||||||
int nbytes = 0, streamlen, stream_name_len, next, idx = 0;
|
int nbytes = 0, streamlen, stream_name_len, next, idx = 0;
|
||||||
|
int buf_free_len;
|
||||||
|
struct smb2_query_info_req *req = ksmbd_req_buf_next(work);
|
||||||
|
|
||||||
generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp),
|
generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp),
|
||||||
&stat);
|
&stat);
|
||||||
@ -4420,6 +4458,12 @@ static void get_file_stream_info(struct ksmbd_work *work,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
buf_free_len =
|
||||||
|
smb2_calc_max_out_buf_len(work, 8,
|
||||||
|
le32_to_cpu(req->OutputBufferLength));
|
||||||
|
if (buf_free_len < 0)
|
||||||
|
goto out;
|
||||||
|
|
||||||
while (idx < xattr_list_len) {
|
while (idx < xattr_list_len) {
|
||||||
stream_name = xattr_list + idx;
|
stream_name = xattr_list + idx;
|
||||||
streamlen = strlen(stream_name);
|
streamlen = strlen(stream_name);
|
||||||
@ -4444,6 +4488,10 @@ static void get_file_stream_info(struct ksmbd_work *work,
|
|||||||
streamlen = snprintf(stream_buf, streamlen + 1,
|
streamlen = snprintf(stream_buf, streamlen + 1,
|
||||||
":%s", &stream_name[XATTR_NAME_STREAM_LEN]);
|
":%s", &stream_name[XATTR_NAME_STREAM_LEN]);
|
||||||
|
|
||||||
|
next = sizeof(struct smb2_file_stream_info) + streamlen * 2;
|
||||||
|
if (next > buf_free_len)
|
||||||
|
break;
|
||||||
|
|
||||||
file_info = (struct smb2_file_stream_info *)&rsp->Buffer[nbytes];
|
file_info = (struct smb2_file_stream_info *)&rsp->Buffer[nbytes];
|
||||||
streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName,
|
streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName,
|
||||||
stream_buf, streamlen,
|
stream_buf, streamlen,
|
||||||
@ -4454,12 +4502,13 @@ static void get_file_stream_info(struct ksmbd_work *work,
|
|||||||
file_info->StreamSize = cpu_to_le64(stream_name_len);
|
file_info->StreamSize = cpu_to_le64(stream_name_len);
|
||||||
file_info->StreamAllocationSize = cpu_to_le64(stream_name_len);
|
file_info->StreamAllocationSize = cpu_to_le64(stream_name_len);
|
||||||
|
|
||||||
next = sizeof(struct smb2_file_stream_info) + streamlen;
|
|
||||||
nbytes += next;
|
nbytes += next;
|
||||||
|
buf_free_len -= next;
|
||||||
file_info->NextEntryOffset = cpu_to_le32(next);
|
file_info->NextEntryOffset = cpu_to_le32(next);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!S_ISDIR(stat.mode)) {
|
if (!S_ISDIR(stat.mode) &&
|
||||||
|
buf_free_len >= sizeof(struct smb2_file_stream_info) + 7 * 2) {
|
||||||
file_info = (struct smb2_file_stream_info *)
|
file_info = (struct smb2_file_stream_info *)
|
||||||
&rsp->Buffer[nbytes];
|
&rsp->Buffer[nbytes];
|
||||||
streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName,
|
streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName,
|
||||||
@ -6220,8 +6269,7 @@ static noinline int smb2_write_pipe(struct ksmbd_work *work)
|
|||||||
(offsetof(struct smb2_write_req, Buffer) - 4)) {
|
(offsetof(struct smb2_write_req, Buffer) - 4)) {
|
||||||
data_buf = (char *)&req->Buffer[0];
|
data_buf = (char *)&req->Buffer[0];
|
||||||
} else {
|
} else {
|
||||||
if ((le16_to_cpu(req->DataOffset) > get_rfc1002_len(req)) ||
|
if ((u64)le16_to_cpu(req->DataOffset) + length > get_rfc1002_len(req)) {
|
||||||
(le16_to_cpu(req->DataOffset) + length > get_rfc1002_len(req))) {
|
|
||||||
pr_err("invalid write data offset %u, smb_len %u\n",
|
pr_err("invalid write data offset %u, smb_len %u\n",
|
||||||
le16_to_cpu(req->DataOffset),
|
le16_to_cpu(req->DataOffset),
|
||||||
get_rfc1002_len(req));
|
get_rfc1002_len(req));
|
||||||
@ -6379,8 +6427,7 @@ int smb2_write(struct ksmbd_work *work)
|
|||||||
(offsetof(struct smb2_write_req, Buffer) - 4)) {
|
(offsetof(struct smb2_write_req, Buffer) - 4)) {
|
||||||
data_buf = (char *)&req->Buffer[0];
|
data_buf = (char *)&req->Buffer[0];
|
||||||
} else {
|
} else {
|
||||||
if ((le16_to_cpu(req->DataOffset) > get_rfc1002_len(req)) ||
|
if ((u64)le16_to_cpu(req->DataOffset) + length > get_rfc1002_len(req)) {
|
||||||
(le16_to_cpu(req->DataOffset) + length > get_rfc1002_len(req))) {
|
|
||||||
pr_err("invalid write data offset %u, smb_len %u\n",
|
pr_err("invalid write data offset %u, smb_len %u\n",
|
||||||
le16_to_cpu(req->DataOffset),
|
le16_to_cpu(req->DataOffset),
|
||||||
get_rfc1002_len(req));
|
get_rfc1002_len(req));
|
||||||
@ -7023,24 +7070,26 @@ int smb2_lock(struct ksmbd_work *work)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fsctl_copychunk(struct ksmbd_work *work, struct smb2_ioctl_req *req,
|
static int fsctl_copychunk(struct ksmbd_work *work,
|
||||||
|
struct copychunk_ioctl_req *ci_req,
|
||||||
|
unsigned int cnt_code,
|
||||||
|
unsigned int input_count,
|
||||||
|
unsigned long long volatile_id,
|
||||||
|
unsigned long long persistent_id,
|
||||||
struct smb2_ioctl_rsp *rsp)
|
struct smb2_ioctl_rsp *rsp)
|
||||||
{
|
{
|
||||||
struct copychunk_ioctl_req *ci_req;
|
|
||||||
struct copychunk_ioctl_rsp *ci_rsp;
|
struct copychunk_ioctl_rsp *ci_rsp;
|
||||||
struct ksmbd_file *src_fp = NULL, *dst_fp = NULL;
|
struct ksmbd_file *src_fp = NULL, *dst_fp = NULL;
|
||||||
struct srv_copychunk *chunks;
|
struct srv_copychunk *chunks;
|
||||||
unsigned int i, chunk_count, chunk_count_written = 0;
|
unsigned int i, chunk_count, chunk_count_written = 0;
|
||||||
unsigned int chunk_size_written = 0;
|
unsigned int chunk_size_written = 0;
|
||||||
loff_t total_size_written = 0;
|
loff_t total_size_written = 0;
|
||||||
int ret, cnt_code;
|
int ret = 0;
|
||||||
|
|
||||||
cnt_code = le32_to_cpu(req->CntCode);
|
|
||||||
ci_req = (struct copychunk_ioctl_req *)&req->Buffer[0];
|
|
||||||
ci_rsp = (struct copychunk_ioctl_rsp *)&rsp->Buffer[0];
|
ci_rsp = (struct copychunk_ioctl_rsp *)&rsp->Buffer[0];
|
||||||
|
|
||||||
rsp->VolatileFileId = req->VolatileFileId;
|
rsp->VolatileFileId = cpu_to_le64(volatile_id);
|
||||||
rsp->PersistentFileId = req->PersistentFileId;
|
rsp->PersistentFileId = cpu_to_le64(persistent_id);
|
||||||
ci_rsp->ChunksWritten =
|
ci_rsp->ChunksWritten =
|
||||||
cpu_to_le32(ksmbd_server_side_copy_max_chunk_count());
|
cpu_to_le32(ksmbd_server_side_copy_max_chunk_count());
|
||||||
ci_rsp->ChunkBytesWritten =
|
ci_rsp->ChunkBytesWritten =
|
||||||
@ -7050,12 +7099,13 @@ static int fsctl_copychunk(struct ksmbd_work *work, struct smb2_ioctl_req *req,
|
|||||||
|
|
||||||
chunks = (struct srv_copychunk *)&ci_req->Chunks[0];
|
chunks = (struct srv_copychunk *)&ci_req->Chunks[0];
|
||||||
chunk_count = le32_to_cpu(ci_req->ChunkCount);
|
chunk_count = le32_to_cpu(ci_req->ChunkCount);
|
||||||
|
if (chunk_count == 0)
|
||||||
|
goto out;
|
||||||
total_size_written = 0;
|
total_size_written = 0;
|
||||||
|
|
||||||
/* verify the SRV_COPYCHUNK_COPY packet */
|
/* verify the SRV_COPYCHUNK_COPY packet */
|
||||||
if (chunk_count > ksmbd_server_side_copy_max_chunk_count() ||
|
if (chunk_count > ksmbd_server_side_copy_max_chunk_count() ||
|
||||||
le32_to_cpu(req->InputCount) <
|
input_count < offsetof(struct copychunk_ioctl_req, Chunks) +
|
||||||
offsetof(struct copychunk_ioctl_req, Chunks) +
|
|
||||||
chunk_count * sizeof(struct srv_copychunk)) {
|
chunk_count * sizeof(struct srv_copychunk)) {
|
||||||
rsp->hdr.Status = STATUS_INVALID_PARAMETER;
|
rsp->hdr.Status = STATUS_INVALID_PARAMETER;
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -7076,9 +7126,7 @@ static int fsctl_copychunk(struct ksmbd_work *work, struct smb2_ioctl_req *req,
|
|||||||
|
|
||||||
src_fp = ksmbd_lookup_foreign_fd(work,
|
src_fp = ksmbd_lookup_foreign_fd(work,
|
||||||
le64_to_cpu(ci_req->ResumeKey[0]));
|
le64_to_cpu(ci_req->ResumeKey[0]));
|
||||||
dst_fp = ksmbd_lookup_fd_slow(work,
|
dst_fp = ksmbd_lookup_fd_slow(work, volatile_id, persistent_id);
|
||||||
le64_to_cpu(req->VolatileFileId),
|
|
||||||
le64_to_cpu(req->PersistentFileId));
|
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
if (!src_fp ||
|
if (!src_fp ||
|
||||||
src_fp->persistent_id != le64_to_cpu(ci_req->ResumeKey[1])) {
|
src_fp->persistent_id != le64_to_cpu(ci_req->ResumeKey[1])) {
|
||||||
@ -7153,8 +7201,8 @@ static __be32 idev_ipv4_address(struct in_device *idev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
|
static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
|
||||||
struct smb2_ioctl_req *req,
|
struct smb2_ioctl_rsp *rsp,
|
||||||
struct smb2_ioctl_rsp *rsp)
|
unsigned int out_buf_len)
|
||||||
{
|
{
|
||||||
struct network_interface_info_ioctl_rsp *nii_rsp = NULL;
|
struct network_interface_info_ioctl_rsp *nii_rsp = NULL;
|
||||||
int nbytes = 0;
|
int nbytes = 0;
|
||||||
@ -7166,6 +7214,12 @@ static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
|
|||||||
|
|
||||||
rtnl_lock();
|
rtnl_lock();
|
||||||
for_each_netdev(&init_net, netdev) {
|
for_each_netdev(&init_net, netdev) {
|
||||||
|
if (out_buf_len <
|
||||||
|
nbytes + sizeof(struct network_interface_info_ioctl_rsp)) {
|
||||||
|
rtnl_unlock();
|
||||||
|
return -ENOSPC;
|
||||||
|
}
|
||||||
|
|
||||||
if (netdev->type == ARPHRD_LOOPBACK)
|
if (netdev->type == ARPHRD_LOOPBACK)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@ -7245,11 +7299,6 @@ static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
|
|||||||
if (nii_rsp)
|
if (nii_rsp)
|
||||||
nii_rsp->Next = 0;
|
nii_rsp->Next = 0;
|
||||||
|
|
||||||
if (!nbytes) {
|
|
||||||
rsp->hdr.Status = STATUS_BUFFER_TOO_SMALL;
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
rsp->PersistentFileId = cpu_to_le64(SMB2_NO_FID);
|
rsp->PersistentFileId = cpu_to_le64(SMB2_NO_FID);
|
||||||
rsp->VolatileFileId = cpu_to_le64(SMB2_NO_FID);
|
rsp->VolatileFileId = cpu_to_le64(SMB2_NO_FID);
|
||||||
return nbytes;
|
return nbytes;
|
||||||
@ -7257,11 +7306,16 @@ static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
|
|||||||
|
|
||||||
static int fsctl_validate_negotiate_info(struct ksmbd_conn *conn,
|
static int fsctl_validate_negotiate_info(struct ksmbd_conn *conn,
|
||||||
struct validate_negotiate_info_req *neg_req,
|
struct validate_negotiate_info_req *neg_req,
|
||||||
struct validate_negotiate_info_rsp *neg_rsp)
|
struct validate_negotiate_info_rsp *neg_rsp,
|
||||||
|
unsigned int in_buf_len)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
int dialect;
|
int dialect;
|
||||||
|
|
||||||
|
if (in_buf_len < sizeof(struct validate_negotiate_info_req) +
|
||||||
|
le16_to_cpu(neg_req->DialectCount) * sizeof(__le16))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
dialect = ksmbd_lookup_dialect_by_id(neg_req->Dialects,
|
dialect = ksmbd_lookup_dialect_by_id(neg_req->Dialects,
|
||||||
neg_req->DialectCount);
|
neg_req->DialectCount);
|
||||||
if (dialect == BAD_PROT_ID || dialect != conn->dialect) {
|
if (dialect == BAD_PROT_ID || dialect != conn->dialect) {
|
||||||
@ -7295,7 +7349,7 @@ static int fsctl_validate_negotiate_info(struct ksmbd_conn *conn,
|
|||||||
static int fsctl_query_allocated_ranges(struct ksmbd_work *work, u64 id,
|
static int fsctl_query_allocated_ranges(struct ksmbd_work *work, u64 id,
|
||||||
struct file_allocated_range_buffer *qar_req,
|
struct file_allocated_range_buffer *qar_req,
|
||||||
struct file_allocated_range_buffer *qar_rsp,
|
struct file_allocated_range_buffer *qar_rsp,
|
||||||
int in_count, int *out_count)
|
unsigned int in_count, unsigned int *out_count)
|
||||||
{
|
{
|
||||||
struct ksmbd_file *fp;
|
struct ksmbd_file *fp;
|
||||||
loff_t start, length;
|
loff_t start, length;
|
||||||
@ -7322,7 +7376,8 @@ static int fsctl_query_allocated_ranges(struct ksmbd_work *work, u64 id,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int fsctl_pipe_transceive(struct ksmbd_work *work, u64 id,
|
static int fsctl_pipe_transceive(struct ksmbd_work *work, u64 id,
|
||||||
int out_buf_len, struct smb2_ioctl_req *req,
|
unsigned int out_buf_len,
|
||||||
|
struct smb2_ioctl_req *req,
|
||||||
struct smb2_ioctl_rsp *rsp)
|
struct smb2_ioctl_rsp *rsp)
|
||||||
{
|
{
|
||||||
struct ksmbd_rpc_command *rpc_resp;
|
struct ksmbd_rpc_command *rpc_resp;
|
||||||
@ -7436,8 +7491,7 @@ int smb2_ioctl(struct ksmbd_work *work)
|
|||||||
{
|
{
|
||||||
struct smb2_ioctl_req *req;
|
struct smb2_ioctl_req *req;
|
||||||
struct smb2_ioctl_rsp *rsp, *rsp_org;
|
struct smb2_ioctl_rsp *rsp, *rsp_org;
|
||||||
int cnt_code, nbytes = 0;
|
unsigned int cnt_code, nbytes = 0, out_buf_len, in_buf_len;
|
||||||
int out_buf_len;
|
|
||||||
u64 id = KSMBD_NO_FID;
|
u64 id = KSMBD_NO_FID;
|
||||||
struct ksmbd_conn *conn = work->conn;
|
struct ksmbd_conn *conn = work->conn;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
@ -7465,8 +7519,14 @@ int smb2_ioctl(struct ksmbd_work *work)
|
|||||||
}
|
}
|
||||||
|
|
||||||
cnt_code = le32_to_cpu(req->CntCode);
|
cnt_code = le32_to_cpu(req->CntCode);
|
||||||
out_buf_len = le32_to_cpu(req->MaxOutputResponse);
|
ret = smb2_calc_max_out_buf_len(work, 48,
|
||||||
out_buf_len = min(KSMBD_IPC_MAX_PAYLOAD, out_buf_len);
|
le32_to_cpu(req->MaxOutputResponse));
|
||||||
|
if (ret < 0) {
|
||||||
|
rsp->hdr.Status = STATUS_INVALID_PARAMETER;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
out_buf_len = (unsigned int)ret;
|
||||||
|
in_buf_len = le32_to_cpu(req->InputCount);
|
||||||
|
|
||||||
switch (cnt_code) {
|
switch (cnt_code) {
|
||||||
case FSCTL_DFS_GET_REFERRALS:
|
case FSCTL_DFS_GET_REFERRALS:
|
||||||
@ -7494,6 +7554,7 @@ int smb2_ioctl(struct ksmbd_work *work)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case FSCTL_PIPE_TRANSCEIVE:
|
case FSCTL_PIPE_TRANSCEIVE:
|
||||||
|
out_buf_len = min_t(u32, KSMBD_IPC_MAX_PAYLOAD, out_buf_len);
|
||||||
nbytes = fsctl_pipe_transceive(work, id, out_buf_len, req, rsp);
|
nbytes = fsctl_pipe_transceive(work, id, out_buf_len, req, rsp);
|
||||||
break;
|
break;
|
||||||
case FSCTL_VALIDATE_NEGOTIATE_INFO:
|
case FSCTL_VALIDATE_NEGOTIATE_INFO:
|
||||||
@ -7502,9 +7563,16 @@ int smb2_ioctl(struct ksmbd_work *work)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (in_buf_len < sizeof(struct validate_negotiate_info_req))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (out_buf_len < sizeof(struct validate_negotiate_info_rsp))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
ret = fsctl_validate_negotiate_info(conn,
|
ret = fsctl_validate_negotiate_info(conn,
|
||||||
(struct validate_negotiate_info_req *)&req->Buffer[0],
|
(struct validate_negotiate_info_req *)&req->Buffer[0],
|
||||||
(struct validate_negotiate_info_rsp *)&rsp->Buffer[0]);
|
(struct validate_negotiate_info_rsp *)&rsp->Buffer[0],
|
||||||
|
in_buf_len);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
@ -7513,9 +7581,10 @@ int smb2_ioctl(struct ksmbd_work *work)
|
|||||||
rsp->VolatileFileId = cpu_to_le64(SMB2_NO_FID);
|
rsp->VolatileFileId = cpu_to_le64(SMB2_NO_FID);
|
||||||
break;
|
break;
|
||||||
case FSCTL_QUERY_NETWORK_INTERFACE_INFO:
|
case FSCTL_QUERY_NETWORK_INTERFACE_INFO:
|
||||||
nbytes = fsctl_query_iface_info_ioctl(conn, req, rsp);
|
ret = fsctl_query_iface_info_ioctl(conn, rsp, out_buf_len);
|
||||||
if (nbytes < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
nbytes = ret;
|
||||||
break;
|
break;
|
||||||
case FSCTL_REQUEST_RESUME_KEY:
|
case FSCTL_REQUEST_RESUME_KEY:
|
||||||
if (out_buf_len < sizeof(struct resume_key_ioctl_rsp)) {
|
if (out_buf_len < sizeof(struct resume_key_ioctl_rsp)) {
|
||||||
@ -7540,15 +7609,33 @@ int smb2_ioctl(struct ksmbd_work *work)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (in_buf_len < sizeof(struct copychunk_ioctl_req)) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
if (out_buf_len < sizeof(struct copychunk_ioctl_rsp)) {
|
if (out_buf_len < sizeof(struct copychunk_ioctl_rsp)) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
nbytes = sizeof(struct copychunk_ioctl_rsp);
|
nbytes = sizeof(struct copychunk_ioctl_rsp);
|
||||||
fsctl_copychunk(work, req, rsp);
|
rsp->VolatileFileId = req->VolatileFileId;
|
||||||
|
rsp->PersistentFileId = req->PersistentFileId;
|
||||||
|
fsctl_copychunk(work,
|
||||||
|
(struct copychunk_ioctl_req *)&req->Buffer[0],
|
||||||
|
le32_to_cpu(req->CntCode),
|
||||||
|
le32_to_cpu(req->InputCount),
|
||||||
|
le64_to_cpu(req->VolatileFileId),
|
||||||
|
le64_to_cpu(req->PersistentFileId),
|
||||||
|
rsp);
|
||||||
break;
|
break;
|
||||||
case FSCTL_SET_SPARSE:
|
case FSCTL_SET_SPARSE:
|
||||||
|
if (in_buf_len < sizeof(struct file_sparse)) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
ret = fsctl_set_sparse(work, id,
|
ret = fsctl_set_sparse(work, id,
|
||||||
(struct file_sparse *)&req->Buffer[0]);
|
(struct file_sparse *)&req->Buffer[0]);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
@ -7567,6 +7654,11 @@ int smb2_ioctl(struct ksmbd_work *work)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (in_buf_len < sizeof(struct file_zero_data_information)) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
zero_data =
|
zero_data =
|
||||||
(struct file_zero_data_information *)&req->Buffer[0];
|
(struct file_zero_data_information *)&req->Buffer[0];
|
||||||
|
|
||||||
@ -7586,6 +7678,11 @@ int smb2_ioctl(struct ksmbd_work *work)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case FSCTL_QUERY_ALLOCATED_RANGES:
|
case FSCTL_QUERY_ALLOCATED_RANGES:
|
||||||
|
if (in_buf_len < sizeof(struct file_allocated_range_buffer)) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
ret = fsctl_query_allocated_ranges(work, id,
|
ret = fsctl_query_allocated_ranges(work, id,
|
||||||
(struct file_allocated_range_buffer *)&req->Buffer[0],
|
(struct file_allocated_range_buffer *)&req->Buffer[0],
|
||||||
(struct file_allocated_range_buffer *)&rsp->Buffer[0],
|
(struct file_allocated_range_buffer *)&rsp->Buffer[0],
|
||||||
@ -7626,6 +7723,11 @@ int smb2_ioctl(struct ksmbd_work *work)
|
|||||||
struct duplicate_extents_to_file *dup_ext;
|
struct duplicate_extents_to_file *dup_ext;
|
||||||
loff_t src_off, dst_off, length, cloned;
|
loff_t src_off, dst_off, length, cloned;
|
||||||
|
|
||||||
|
if (in_buf_len < sizeof(struct duplicate_extents_to_file)) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
dup_ext = (struct duplicate_extents_to_file *)&req->Buffer[0];
|
dup_ext = (struct duplicate_extents_to_file *)&req->Buffer[0];
|
||||||
|
|
||||||
fp_in = ksmbd_lookup_fd_slow(work, dup_ext->VolatileFileHandle,
|
fp_in = ksmbd_lookup_fd_slow(work, dup_ext->VolatileFileHandle,
|
||||||
@ -7696,6 +7798,8 @@ int smb2_ioctl(struct ksmbd_work *work)
|
|||||||
rsp->hdr.Status = STATUS_OBJECT_NAME_NOT_FOUND;
|
rsp->hdr.Status = STATUS_OBJECT_NAME_NOT_FOUND;
|
||||||
else if (ret == -EOPNOTSUPP)
|
else if (ret == -EOPNOTSUPP)
|
||||||
rsp->hdr.Status = STATUS_NOT_SUPPORTED;
|
rsp->hdr.Status = STATUS_NOT_SUPPORTED;
|
||||||
|
else if (ret == -ENOSPC)
|
||||||
|
rsp->hdr.Status = STATUS_BUFFER_TOO_SMALL;
|
||||||
else if (ret < 0 || rsp->hdr.Status == 0)
|
else if (ret < 0 || rsp->hdr.Status == 0)
|
||||||
rsp->hdr.Status = STATUS_INVALID_PARAMETER;
|
rsp->hdr.Status = STATUS_INVALID_PARAMETER;
|
||||||
smb2_set_err_rsp(work);
|
smb2_set_err_rsp(work);
|
||||||
|
@ -113,6 +113,8 @@
|
|||||||
#define SMB21_DEFAULT_IOSIZE (1024 * 1024)
|
#define SMB21_DEFAULT_IOSIZE (1024 * 1024)
|
||||||
#define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024)
|
#define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024)
|
||||||
#define SMB3_DEFAULT_TRANS_SIZE (1024 * 1024)
|
#define SMB3_DEFAULT_TRANS_SIZE (1024 * 1024)
|
||||||
|
#define SMB3_MIN_IOSIZE (64 * 1024)
|
||||||
|
#define SMB3_MAX_IOSIZE (8 * 1024 * 1024)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* SMB2 Header Definition
|
* SMB2 Header Definition
|
||||||
|
@ -601,7 +601,7 @@ int ksmbd_ipc_tree_disconnect_request(unsigned long long session_id,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ksmbd_ipc_logout_request(const char *account)
|
int ksmbd_ipc_logout_request(const char *account, int flags)
|
||||||
{
|
{
|
||||||
struct ksmbd_ipc_msg *msg;
|
struct ksmbd_ipc_msg *msg;
|
||||||
struct ksmbd_logout_request *req;
|
struct ksmbd_logout_request *req;
|
||||||
@ -616,6 +616,7 @@ int ksmbd_ipc_logout_request(const char *account)
|
|||||||
|
|
||||||
msg->type = KSMBD_EVENT_LOGOUT_REQUEST;
|
msg->type = KSMBD_EVENT_LOGOUT_REQUEST;
|
||||||
req = (struct ksmbd_logout_request *)msg->payload;
|
req = (struct ksmbd_logout_request *)msg->payload;
|
||||||
|
req->account_flags = flags;
|
||||||
strscpy(req->account, account, KSMBD_REQ_MAX_ACCOUNT_NAME_SZ);
|
strscpy(req->account, account, KSMBD_REQ_MAX_ACCOUNT_NAME_SZ);
|
||||||
|
|
||||||
ret = ipc_msg_send(msg);
|
ret = ipc_msg_send(msg);
|
||||||
|
@ -25,7 +25,7 @@ ksmbd_ipc_tree_connect_request(struct ksmbd_session *sess,
|
|||||||
struct sockaddr *peer_addr);
|
struct sockaddr *peer_addr);
|
||||||
int ksmbd_ipc_tree_disconnect_request(unsigned long long session_id,
|
int ksmbd_ipc_tree_disconnect_request(unsigned long long session_id,
|
||||||
unsigned long long connect_id);
|
unsigned long long connect_id);
|
||||||
int ksmbd_ipc_logout_request(const char *account);
|
int ksmbd_ipc_logout_request(const char *account, int flags);
|
||||||
struct ksmbd_share_config_response *
|
struct ksmbd_share_config_response *
|
||||||
ksmbd_ipc_share_config_request(const char *name);
|
ksmbd_ipc_share_config_request(const char *name);
|
||||||
struct ksmbd_spnego_authen_response *
|
struct ksmbd_spnego_authen_response *
|
||||||
|
@ -549,6 +549,10 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
|||||||
|
|
||||||
switch (recvmsg->type) {
|
switch (recvmsg->type) {
|
||||||
case SMB_DIRECT_MSG_NEGOTIATE_REQ:
|
case SMB_DIRECT_MSG_NEGOTIATE_REQ:
|
||||||
|
if (wc->byte_len < sizeof(struct smb_direct_negotiate_req)) {
|
||||||
|
put_empty_recvmsg(t, recvmsg);
|
||||||
|
return;
|
||||||
|
}
|
||||||
t->negotiation_requested = true;
|
t->negotiation_requested = true;
|
||||||
t->full_packet_received = true;
|
t->full_packet_received = true;
|
||||||
wake_up_interruptible(&t->wait_status);
|
wake_up_interruptible(&t->wait_status);
|
||||||
@ -556,10 +560,23 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
|||||||
case SMB_DIRECT_MSG_DATA_TRANSFER: {
|
case SMB_DIRECT_MSG_DATA_TRANSFER: {
|
||||||
struct smb_direct_data_transfer *data_transfer =
|
struct smb_direct_data_transfer *data_transfer =
|
||||||
(struct smb_direct_data_transfer *)recvmsg->packet;
|
(struct smb_direct_data_transfer *)recvmsg->packet;
|
||||||
int data_length = le32_to_cpu(data_transfer->data_length);
|
unsigned int data_length;
|
||||||
int avail_recvmsg_count, receive_credits;
|
int avail_recvmsg_count, receive_credits;
|
||||||
|
|
||||||
|
if (wc->byte_len <
|
||||||
|
offsetof(struct smb_direct_data_transfer, padding)) {
|
||||||
|
put_empty_recvmsg(t, recvmsg);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
data_length = le32_to_cpu(data_transfer->data_length);
|
||||||
if (data_length) {
|
if (data_length) {
|
||||||
|
if (wc->byte_len < sizeof(struct smb_direct_data_transfer) +
|
||||||
|
(u64)data_length) {
|
||||||
|
put_empty_recvmsg(t, recvmsg);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (t->full_packet_received)
|
if (t->full_packet_received)
|
||||||
recvmsg->first_segment = true;
|
recvmsg->first_segment = true;
|
||||||
|
|
||||||
@ -568,7 +585,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
|||||||
else
|
else
|
||||||
t->full_packet_received = true;
|
t->full_packet_received = true;
|
||||||
|
|
||||||
enqueue_reassembly(t, recvmsg, data_length);
|
enqueue_reassembly(t, recvmsg, (int)data_length);
|
||||||
wake_up_interruptible(&t->wait_reassembly_queue);
|
wake_up_interruptible(&t->wait_reassembly_queue);
|
||||||
|
|
||||||
spin_lock(&t->receive_credit_lock);
|
spin_lock(&t->receive_credit_lock);
|
||||||
|
@ -1023,7 +1023,7 @@ int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct ksmbd_file *fp,
|
|||||||
|
|
||||||
int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
|
int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
|
||||||
struct file_allocated_range_buffer *ranges,
|
struct file_allocated_range_buffer *ranges,
|
||||||
int in_count, int *out_count)
|
unsigned int in_count, unsigned int *out_count)
|
||||||
{
|
{
|
||||||
struct file *f = fp->filp;
|
struct file *f = fp->filp;
|
||||||
struct inode *inode = file_inode(fp->filp);
|
struct inode *inode = file_inode(fp->filp);
|
||||||
|
@ -166,7 +166,7 @@ int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct ksmbd_file *fp,
|
|||||||
struct file_allocated_range_buffer;
|
struct file_allocated_range_buffer;
|
||||||
int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
|
int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
|
||||||
struct file_allocated_range_buffer *ranges,
|
struct file_allocated_range_buffer *ranges,
|
||||||
int in_count, int *out_count);
|
unsigned int in_count, unsigned int *out_count);
|
||||||
int ksmbd_vfs_unlink(struct user_namespace *user_ns,
|
int ksmbd_vfs_unlink(struct user_namespace *user_ns,
|
||||||
struct dentry *dir, struct dentry *dentry);
|
struct dentry *dir, struct dentry *dentry);
|
||||||
void *ksmbd_vfs_init_kstat(char **p, struct ksmbd_kstat *ksmbd_kstat);
|
void *ksmbd_vfs_init_kstat(char **p, struct ksmbd_kstat *ksmbd_kstat);
|
||||||
|
@ -7045,7 +7045,7 @@ void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di)
|
|||||||
int ocfs2_convert_inline_data_to_extents(struct inode *inode,
|
int ocfs2_convert_inline_data_to_extents(struct inode *inode,
|
||||||
struct buffer_head *di_bh)
|
struct buffer_head *di_bh)
|
||||||
{
|
{
|
||||||
int ret, i, has_data, num_pages = 0;
|
int ret, has_data, num_pages = 0;
|
||||||
int need_free = 0;
|
int need_free = 0;
|
||||||
u32 bit_off, num;
|
u32 bit_off, num;
|
||||||
handle_t *handle;
|
handle_t *handle;
|
||||||
@ -7054,26 +7054,17 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
|
|||||||
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
|
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
|
||||||
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
|
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
|
||||||
struct ocfs2_alloc_context *data_ac = NULL;
|
struct ocfs2_alloc_context *data_ac = NULL;
|
||||||
struct page **pages = NULL;
|
struct page *page = NULL;
|
||||||
loff_t end = osb->s_clustersize;
|
|
||||||
struct ocfs2_extent_tree et;
|
struct ocfs2_extent_tree et;
|
||||||
int did_quota = 0;
|
int did_quota = 0;
|
||||||
|
|
||||||
has_data = i_size_read(inode) ? 1 : 0;
|
has_data = i_size_read(inode) ? 1 : 0;
|
||||||
|
|
||||||
if (has_data) {
|
if (has_data) {
|
||||||
pages = kcalloc(ocfs2_pages_per_cluster(osb->sb),
|
|
||||||
sizeof(struct page *), GFP_NOFS);
|
|
||||||
if (pages == NULL) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
mlog_errno(ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
|
ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
goto free_pages;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -7093,7 +7084,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (has_data) {
|
if (has_data) {
|
||||||
unsigned int page_end;
|
unsigned int page_end = min_t(unsigned, PAGE_SIZE,
|
||||||
|
osb->s_clustersize);
|
||||||
u64 phys;
|
u64 phys;
|
||||||
|
|
||||||
ret = dquot_alloc_space_nodirty(inode,
|
ret = dquot_alloc_space_nodirty(inode,
|
||||||
@ -7117,15 +7109,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
|
|||||||
*/
|
*/
|
||||||
block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
|
block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
|
||||||
|
|
||||||
/*
|
ret = ocfs2_grab_eof_pages(inode, 0, page_end, &page,
|
||||||
* Non sparse file systems zero on extend, so no need
|
&num_pages);
|
||||||
* to do that now.
|
|
||||||
*/
|
|
||||||
if (!ocfs2_sparse_alloc(osb) &&
|
|
||||||
PAGE_SIZE < osb->s_clustersize)
|
|
||||||
end = PAGE_SIZE;
|
|
||||||
|
|
||||||
ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
need_free = 1;
|
need_free = 1;
|
||||||
@ -7136,20 +7121,15 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
|
|||||||
* This should populate the 1st page for us and mark
|
* This should populate the 1st page for us and mark
|
||||||
* it up to date.
|
* it up to date.
|
||||||
*/
|
*/
|
||||||
ret = ocfs2_read_inline_data(inode, pages[0], di_bh);
|
ret = ocfs2_read_inline_data(inode, page, di_bh);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
need_free = 1;
|
need_free = 1;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
page_end = PAGE_SIZE;
|
ocfs2_map_and_dirty_page(inode, handle, 0, page_end, page, 0,
|
||||||
if (PAGE_SIZE > osb->s_clustersize)
|
&phys);
|
||||||
page_end = osb->s_clustersize;
|
|
||||||
|
|
||||||
for (i = 0; i < num_pages; i++)
|
|
||||||
ocfs2_map_and_dirty_page(inode, handle, 0, page_end,
|
|
||||||
pages[i], i > 0, &phys);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&oi->ip_lock);
|
spin_lock(&oi->ip_lock);
|
||||||
@ -7180,8 +7160,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
|
|||||||
}
|
}
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
if (pages)
|
if (page)
|
||||||
ocfs2_unlock_and_free_pages(pages, num_pages);
|
ocfs2_unlock_and_free_pages(&page, num_pages);
|
||||||
|
|
||||||
out_commit:
|
out_commit:
|
||||||
if (ret < 0 && did_quota)
|
if (ret < 0 && did_quota)
|
||||||
@ -7205,8 +7185,6 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
|
|||||||
out:
|
out:
|
||||||
if (data_ac)
|
if (data_ac)
|
||||||
ocfs2_free_alloc_context(data_ac);
|
ocfs2_free_alloc_context(data_ac);
|
||||||
free_pages:
|
|
||||||
kfree(pages);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1251,7 +1251,7 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
|
|||||||
{
|
{
|
||||||
struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
|
struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
|
||||||
struct journal_head *jh;
|
struct journal_head *jh;
|
||||||
int ret;
|
int ret = 1;
|
||||||
|
|
||||||
if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap))
|
if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap))
|
||||||
return 0;
|
return 0;
|
||||||
@ -1259,6 +1259,8 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
|
|||||||
if (!buffer_jbd(bg_bh))
|
if (!buffer_jbd(bg_bh))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
|
jbd_lock_bh_journal_head(bg_bh);
|
||||||
|
if (buffer_jbd(bg_bh)) {
|
||||||
jh = bh2jh(bg_bh);
|
jh = bh2jh(bg_bh);
|
||||||
spin_lock(&jh->b_state_lock);
|
spin_lock(&jh->b_state_lock);
|
||||||
bg = (struct ocfs2_group_desc *) jh->b_committed_data;
|
bg = (struct ocfs2_group_desc *) jh->b_committed_data;
|
||||||
@ -1267,6 +1269,8 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
|
|||||||
else
|
else
|
||||||
ret = 1;
|
ret = 1;
|
||||||
spin_unlock(&jh->b_state_lock);
|
spin_unlock(&jh->b_state_lock);
|
||||||
|
}
|
||||||
|
jbd_unlock_bh_journal_head(bg_bh);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -2167,11 +2167,17 @@ static int ocfs2_initialize_super(struct super_block *sb,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (ocfs2_clusterinfo_valid(osb)) {
|
if (ocfs2_clusterinfo_valid(osb)) {
|
||||||
|
/*
|
||||||
|
* ci_stack and ci_cluster in ocfs2_cluster_info may not be null
|
||||||
|
* terminated, so make sure no overflow happens here by using
|
||||||
|
* memcpy. Destination strings will always be null terminated
|
||||||
|
* because osb is allocated using kzalloc.
|
||||||
|
*/
|
||||||
osb->osb_stackflags =
|
osb->osb_stackflags =
|
||||||
OCFS2_RAW_SB(di)->s_cluster_info.ci_stackflags;
|
OCFS2_RAW_SB(di)->s_cluster_info.ci_stackflags;
|
||||||
strlcpy(osb->osb_cluster_stack,
|
memcpy(osb->osb_cluster_stack,
|
||||||
OCFS2_RAW_SB(di)->s_cluster_info.ci_stack,
|
OCFS2_RAW_SB(di)->s_cluster_info.ci_stack,
|
||||||
OCFS2_STACK_LABEL_LEN + 1);
|
OCFS2_STACK_LABEL_LEN);
|
||||||
if (strlen(osb->osb_cluster_stack) != OCFS2_STACK_LABEL_LEN) {
|
if (strlen(osb->osb_cluster_stack) != OCFS2_STACK_LABEL_LEN) {
|
||||||
mlog(ML_ERROR,
|
mlog(ML_ERROR,
|
||||||
"couldn't mount because of an invalid "
|
"couldn't mount because of an invalid "
|
||||||
@ -2180,9 +2186,9 @@ static int ocfs2_initialize_super(struct super_block *sb,
|
|||||||
status = -EINVAL;
|
status = -EINVAL;
|
||||||
goto bail;
|
goto bail;
|
||||||
}
|
}
|
||||||
strlcpy(osb->osb_cluster_name,
|
memcpy(osb->osb_cluster_name,
|
||||||
OCFS2_RAW_SB(di)->s_cluster_info.ci_cluster,
|
OCFS2_RAW_SB(di)->s_cluster_info.ci_cluster,
|
||||||
OCFS2_CLUSTER_NAME_LEN + 1);
|
OCFS2_CLUSTER_NAME_LEN);
|
||||||
} else {
|
} else {
|
||||||
/* The empty string is identical with classic tools that
|
/* The empty string is identical with classic tools that
|
||||||
* don't know about s_cluster_info. */
|
* don't know about s_cluster_info. */
|
||||||
|
@ -1827,9 +1827,15 @@ static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
|
|||||||
if (mode_wp && mode_dontwake)
|
if (mode_wp && mode_dontwake)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (mmget_not_zero(ctx->mm)) {
|
||||||
ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
|
ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
|
||||||
uffdio_wp.range.len, mode_wp,
|
uffdio_wp.range.len, mode_wp,
|
||||||
&ctx->mmap_changing);
|
&ctx->mmap_changing);
|
||||||
|
mmput(ctx->mm);
|
||||||
|
} else {
|
||||||
|
return -ESRCH;
|
||||||
|
}
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -5376,7 +5376,6 @@ static inline void wiphy_unlock(struct wiphy *wiphy)
|
|||||||
* netdev and may otherwise be used by driver read-only, will be update
|
* netdev and may otherwise be used by driver read-only, will be update
|
||||||
* by cfg80211 on change_interface
|
* by cfg80211 on change_interface
|
||||||
* @mgmt_registrations: list of registrations for management frames
|
* @mgmt_registrations: list of registrations for management frames
|
||||||
* @mgmt_registrations_lock: lock for the list
|
|
||||||
* @mgmt_registrations_need_update: mgmt registrations were updated,
|
* @mgmt_registrations_need_update: mgmt registrations were updated,
|
||||||
* need to propagate the update to the driver
|
* need to propagate the update to the driver
|
||||||
* @mtx: mutex used to lock data in this struct, may be used by drivers
|
* @mtx: mutex used to lock data in this struct, may be used by drivers
|
||||||
@ -5423,7 +5422,6 @@ struct wireless_dev {
|
|||||||
u32 identifier;
|
u32 identifier;
|
||||||
|
|
||||||
struct list_head mgmt_registrations;
|
struct list_head mgmt_registrations;
|
||||||
spinlock_t mgmt_registrations_lock;
|
|
||||||
u8 mgmt_registrations_need_update:1;
|
u8 mgmt_registrations_need_update:1;
|
||||||
|
|
||||||
struct mutex mtx;
|
struct mutex mtx;
|
||||||
|
@ -54,7 +54,7 @@ struct mctp_sock {
|
|||||||
struct sock sk;
|
struct sock sk;
|
||||||
|
|
||||||
/* bind() params */
|
/* bind() params */
|
||||||
int bind_net;
|
unsigned int bind_net;
|
||||||
mctp_eid_t bind_addr;
|
mctp_eid_t bind_addr;
|
||||||
__u8 bind_type;
|
__u8 bind_type;
|
||||||
|
|
||||||
|
@ -69,6 +69,10 @@ struct mptcp_out_options {
|
|||||||
struct {
|
struct {
|
||||||
u64 sndr_key;
|
u64 sndr_key;
|
||||||
u64 rcvr_key;
|
u64 rcvr_key;
|
||||||
|
u64 data_seq;
|
||||||
|
u32 subflow_seq;
|
||||||
|
u16 data_len;
|
||||||
|
__sum16 csum;
|
||||||
};
|
};
|
||||||
struct {
|
struct {
|
||||||
struct mptcp_addr_info addr;
|
struct mptcp_addr_info addr;
|
||||||
|
@ -384,11 +384,11 @@ sctp_vtag_verify(const struct sctp_chunk *chunk,
|
|||||||
* Verification Tag value does not match the receiver's own
|
* Verification Tag value does not match the receiver's own
|
||||||
* tag value, the receiver shall silently discard the packet...
|
* tag value, the receiver shall silently discard the packet...
|
||||||
*/
|
*/
|
||||||
if (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag)
|
if (ntohl(chunk->sctp_hdr->vtag) != asoc->c.my_vtag)
|
||||||
return 1;
|
return 0;
|
||||||
|
|
||||||
chunk->transport->encap_port = SCTP_INPUT_CB(chunk->skb)->encap_port;
|
chunk->transport->encap_port = SCTP_INPUT_CB(chunk->skb)->encap_port;
|
||||||
return 0;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check VTAG of the packet matches the sender's own tag and the T bit is
|
/* Check VTAG of the packet matches the sender's own tag and the T bit is
|
||||||
|
@ -1208,7 +1208,7 @@ struct proto {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
bool (*stream_memory_free)(const struct sock *sk, int wake);
|
bool (*stream_memory_free)(const struct sock *sk, int wake);
|
||||||
bool (*stream_memory_read)(const struct sock *sk);
|
bool (*sock_is_readable)(struct sock *sk);
|
||||||
/* Memory pressure */
|
/* Memory pressure */
|
||||||
void (*enter_memory_pressure)(struct sock *sk);
|
void (*enter_memory_pressure)(struct sock *sk);
|
||||||
void (*leave_memory_pressure)(struct sock *sk);
|
void (*leave_memory_pressure)(struct sock *sk);
|
||||||
@ -2820,4 +2820,10 @@ void sock_set_sndtimeo(struct sock *sk, s64 secs);
|
|||||||
|
|
||||||
int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
|
int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
|
||||||
|
|
||||||
|
static inline bool sk_is_readable(struct sock *sk)
|
||||||
|
{
|
||||||
|
if (sk->sk_prot->sock_is_readable)
|
||||||
|
return sk->sk_prot->sock_is_readable(sk);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
#endif /* _SOCK_H */
|
#endif /* _SOCK_H */
|
||||||
|
@ -1576,6 +1576,7 @@ struct tcp_md5sig_key {
|
|||||||
u8 keylen;
|
u8 keylen;
|
||||||
u8 family; /* AF_INET or AF_INET6 */
|
u8 family; /* AF_INET or AF_INET6 */
|
||||||
u8 prefixlen;
|
u8 prefixlen;
|
||||||
|
u8 flags;
|
||||||
union tcp_md5_addr addr;
|
union tcp_md5_addr addr;
|
||||||
int l3index; /* set if key added with L3 scope */
|
int l3index; /* set if key added with L3 scope */
|
||||||
u8 key[TCP_MD5SIG_MAXKEYLEN];
|
u8 key[TCP_MD5SIG_MAXKEYLEN];
|
||||||
@ -1621,10 +1622,10 @@ struct tcp_md5sig_pool {
|
|||||||
int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
|
int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
|
||||||
const struct sock *sk, const struct sk_buff *skb);
|
const struct sock *sk, const struct sk_buff *skb);
|
||||||
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
|
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
|
||||||
int family, u8 prefixlen, int l3index,
|
int family, u8 prefixlen, int l3index, u8 flags,
|
||||||
const u8 *newkey, u8 newkeylen, gfp_t gfp);
|
const u8 *newkey, u8 newkeylen, gfp_t gfp);
|
||||||
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
|
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
|
||||||
int family, u8 prefixlen, int l3index);
|
int family, u8 prefixlen, int l3index, u8 flags);
|
||||||
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
|
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
|
||||||
const struct sock *addr_sk);
|
const struct sock *addr_sk);
|
||||||
|
|
||||||
|
@ -358,6 +358,7 @@ int tls_sk_query(struct sock *sk, int optname, char __user *optval,
|
|||||||
int __user *optlen);
|
int __user *optlen);
|
||||||
int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
|
int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
|
||||||
unsigned int optlen);
|
unsigned int optlen);
|
||||||
|
void tls_err_abort(struct sock *sk, int err);
|
||||||
|
|
||||||
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
|
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
|
||||||
void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
|
void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
|
||||||
@ -375,7 +376,7 @@ void tls_sw_release_resources_rx(struct sock *sk);
|
|||||||
void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
|
void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
|
||||||
int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
||||||
int nonblock, int flags, int *addr_len);
|
int nonblock, int flags, int *addr_len);
|
||||||
bool tls_sw_stream_read(const struct sock *sk);
|
bool tls_sw_sock_is_readable(struct sock *sk);
|
||||||
ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
|
ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
|
||||||
struct pipe_inode_info *pipe,
|
struct pipe_inode_info *pipe,
|
||||||
size_t len, unsigned int flags);
|
size_t len, unsigned int flags);
|
||||||
@ -466,12 +467,6 @@ static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void tls_err_abort(struct sock *sk, int err)
|
|
||||||
{
|
|
||||||
sk->sk_err = err;
|
|
||||||
sk_error_report(sk);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool tls_bigint_increment(unsigned char *seq, int len)
|
static inline bool tls_bigint_increment(unsigned char *seq, int len)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
@ -512,7 +507,7 @@ static inline void tls_advance_record_sn(struct sock *sk,
|
|||||||
struct cipher_context *ctx)
|
struct cipher_context *ctx)
|
||||||
{
|
{
|
||||||
if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
|
if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
|
||||||
tls_err_abort(sk, EBADMSG);
|
tls_err_abort(sk, -EBADMSG);
|
||||||
|
|
||||||
if (prot->version != TLS_1_3_VERSION &&
|
if (prot->version != TLS_1_3_VERSION &&
|
||||||
prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
|
prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
|
||||||
|
@ -494,8 +494,9 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
|
|||||||
* CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial
|
* CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial
|
||||||
* packets in udp_gro_complete_segment. As does UDP GSO, verified by
|
* packets in udp_gro_complete_segment. As does UDP GSO, verified by
|
||||||
* udp_send_skb. But when those packets are looped in dev_loopback_xmit
|
* udp_send_skb. But when those packets are looped in dev_loopback_xmit
|
||||||
* their ip_summed is set to CHECKSUM_UNNECESSARY. Reset in this
|
* their ip_summed CHECKSUM_NONE is changed to CHECKSUM_UNNECESSARY.
|
||||||
* specific case, where PARTIAL is both correct and required.
|
* Reset in this specific case, where PARTIAL is both correct and
|
||||||
|
* required.
|
||||||
*/
|
*/
|
||||||
if (skb->pkt_type == PACKET_LOOPBACK)
|
if (skb->pkt_type == PACKET_LOOPBACK)
|
||||||
skb->ip_summed = CHECKSUM_PARTIAL;
|
skb->ip_summed = CHECKSUM_PARTIAL;
|
||||||
|
Loading…
Reference in New Issue
Block a user