text 使用Keras中的Concatenate层的shap错误示例。

Posted

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了text 使用Keras中的Concatenate层的shap错误示例。相关的知识,希望对你有一定的参考价值。

{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n"
     ]
    }
   ],
   "source": [
    "import keras\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "import shap"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2.2.4\n",
      "1.15.4\n",
      "1.8.0\n",
      "0.25.1\n"
     ]
    }
   ],
   "source": [
    "print(keras.__version__) # 2.2.4\n",
    "print(np.__version__) # 1.15.4\n",
    "print(tf.__version__) # 1.8.0\n",
    "print(shap.__version__) # 0.25.1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "n = 10\n",
    "p = 4\n",
    "X_1 = np.random.rand(n, p)\n",
    "X_2 = np.random.rand(n, p)\n",
    "y = np.random.rand(n)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/1\n",
      "\r",
      "10/10 [==============================] - 0s 10ms/step - loss: 0.5915 - mean_squared_error: 0.5915\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<keras.callbacks.History at 0x12296d438>"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from keras.layers import Input, Concatenate, Dense\n",
    "from keras.models import Model\n",
    "\n",
    "# Define inputs\n",
    "input_1 = Input(shape=(p,))\n",
    "input_2 = Input(shape=(p,))\n",
    "\n",
    "# Concatenate\n",
    "x = Concatenate()([input_1, input_2])\n",
    "output = Dense(1)(x)\n",
    "\n",
    "# Compile model\n",
    "model = Model(\n",
    "    inputs=[input_1, input_2], \n",
    "    outputs=output)\n",
    "model.compile(\n",
    "    loss='mean_squared_error', \n",
    "    optimizer=keras.optimizers.Adam(),\n",
    "    metrics=['mse'])\n",
    "\n",
    "# Train model\n",
    "model.fit([X_1, X_2], y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "ename": "AssertionError",
     "evalue": "1th input to concatenate_1/concat cannot vary!",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mInvalidArgumentError\u001b[0m                      Traceback (most recent call last)",
      "\u001b[0;32m~/Workspace/tmp/shap-concat-bug/venv/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\u001b[0m in \u001b[0;36mget_attr\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m   2326\u001b[0m         \u001b[0;32mwith\u001b[0m \u001b[0mc_api_util\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtf_buffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mbuf\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2327\u001b[0;31m           \u001b[0mc_api\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_OperationGetAttrValueProto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_c_op\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbuf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   2328\u001b[0m           \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mc_api\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbuf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mInvalidArgumentError\u001b[0m: Operation 'concatenate_1/concat' has no attr named '_XlaCompile'.",
      "\nDuring handling of the above exception, another exception occurred:\n",
      "\u001b[0;31mValueError\u001b[0m                                Traceback (most recent call last)",
      "\u001b[0;32m~/Workspace/tmp/shap-concat-bug/venv/lib/python3.6/site-packages/tensorflow/python/ops/gradients_impl.py\u001b[0m in \u001b[0;36m_MaybeCompile\u001b[0;34m(scope, op, func, grad_fn)\u001b[0m\n\u001b[1;32m    379\u001b[0m     \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 380\u001b[0;31m       \u001b[0mxla_compile\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_attr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"_XlaCompile\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    381\u001b[0m       xla_separate_compiled_gradients = op.get_attr(\n",
      "\u001b[0;32m~/Workspace/tmp/shap-concat-bug/venv/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\u001b[0m in \u001b[0;36mget_attr\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m   2330\u001b[0m         \u001b[0;31m# Convert to ValueError for backwards compatibility.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2331\u001b[0;31m         \u001b[0;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   2332\u001b[0m       \u001b[0mx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mattr_value_pb2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mAttrValue\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mValueError\u001b[0m: Operation 'concatenate_1/concat' has no attr named '_XlaCompile'.",
      "\nDuring handling of the above exception, another exception occurred:\n",
      "\u001b[0;31mAssertionError\u001b[0m                            Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-5-527f80281438>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      1\u001b[0m \u001b[0me\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mshap\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDeepExplainer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mX_1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mX_2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mshap_values\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshap_values\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mX_1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mX_2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;32m~/Workspace/tmp/shap-concat-bug/venv/lib/python3.6/site-packages/shap/explainers/deep/__init__.py\u001b[0m in \u001b[0;36mshap_values\u001b[0;34m(self, X, ranked_outputs, output_rank_order)\u001b[0m\n\u001b[1;32m    113\u001b[0m         \u001b[0mwere\u001b[0m \u001b[0mchosen\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0;34m\"top\"\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    114\u001b[0m         \"\"\"\n\u001b[0;32m--> 115\u001b[0;31m         \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexplainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshap_values\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mranked_outputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moutput_rank_order\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;32m~/Workspace/tmp/shap-concat-bug/venv/lib/python3.6/site-packages/shap/explainers/deep/deep_tf.py\u001b[0m in \u001b[0;36mshap_values\u001b[0;34m(self, X, ranked_outputs, output_rank_order)\u001b[0m\n\u001b[1;32m    248\u001b[0m                 \u001b[0;31m# run attribution computation graph\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    249\u001b[0m                 \u001b[0mfeature_ind\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel_output_ranks\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 250\u001b[0;31m                 \u001b[0msample_phis\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mphi_symbolic\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfeature_ind\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel_inputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mjoint_input\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    251\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    252\u001b[0m                 \u001b[0;31m# assign the attributions to the right part of the output arrays\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/Workspace/tmp/shap-concat-bug/venv/lib/python3.6/site-packages/shap/explainers/deep/deep_tf.py\u001b[0m in \u001b[0;36mphi_symbolic\u001b[0;34m(self, i)\u001b[0m\n\u001b[1;32m    194\u001b[0m             \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    195\u001b[0m                 \u001b[0mout\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel_output\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmulti_output\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel_output\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 196\u001b[0;31m                 \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mphi_symbolics\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgradients\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mout\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel_inputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    197\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    198\u001b[0m             \u001b[0;32mfinally\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/Workspace/tmp/shap-concat-bug/venv/lib/python3.6/site-packages/tensorflow/python/ops/gradients_impl.py\u001b[0m in \u001b[0;36mgradients\u001b[0;34m(ys, xs, grad_ys, name, colocate_gradients_with_ops, gate_gradients, aggregation_method, stop_gradients)\u001b[0m\n\u001b[1;32m    492\u001b[0m   \u001b[0;32mwith\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_default_graph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_lock\u001b[0m\u001b[0;34m:\u001b[0m  \u001b[0;31m# pylint: disable=protected-access\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    493\u001b[0m     return _GradientsHelper(ys, xs, grad_ys, name, colocate_gradients_with_ops,\n\u001b[0;32m--> 494\u001b[0;31m                             gate_gradients, aggregation_method, stop_gradients)\n\u001b[0m\u001b[1;32m    495\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    496\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/Workspace/tmp/shap-concat-bug/venv/lib/python3.6/site-packages/tensorflow/python/ops/gradients_impl.py\u001b[0m in \u001b[0;36m_GradientsHelper\u001b[0;34m(ys, xs, grad_ys, name, colocate_gradients_with_ops, gate_gradients, aggregation_method, stop_gradients)\u001b[0m\n\u001b[1;32m    634\u001b[0m                 \u001b[0;31m# functions.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    635\u001b[0m                 in_grads = _MaybeCompile(grad_scope, op, func_call,\n\u001b[0;32m--> 636\u001b[0;31m                                          lambda: grad_fn(op, *out_grads))\n\u001b[0m\u001b[1;32m    637\u001b[0m               \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    638\u001b[0m                 \u001b[0;31m# For function call ops, we add a 'SymbolicGradient'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/Workspace/tmp/shap-concat-bug/venv/lib/python3.6/site-packages/tensorflow/python/ops/gradients_impl.py\u001b[0m in \u001b[0;36m_MaybeCompile\u001b[0;34m(scope, op, func, grad_fn)\u001b[0m\n\u001b[1;32m    383\u001b[0m       \u001b[0mxla_scope\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_attr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"_XlaScope\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdecode\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    384\u001b[0m     \u001b[0;32mexcept\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 385\u001b[0;31m       \u001b[0;32mreturn\u001b[0m \u001b[0mgrad_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m  \u001b[0;31m# Exit early\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    386\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    387\u001b[0m   \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mxla_compile\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/Workspace/tmp/shap-concat-bug/venv/lib/python3.6/site-packages/tensorflow/python/ops/gradients_impl.py\u001b[0m in \u001b[0;36m<lambda>\u001b[0;34m()\u001b[0m\n\u001b[1;32m    634\u001b[0m                 \u001b[0;31m# functions.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    635\u001b[0m                 in_grads = _MaybeCompile(grad_scope, op, func_call,\n\u001b[0;32m--> 636\u001b[0;31m                                          lambda: grad_fn(op, *out_grads))\n\u001b[0m\u001b[1;32m    637\u001b[0m               \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    638\u001b[0m                 \u001b[0;31m# For function call ops, we add a 'SymbolicGradient'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/Workspace/tmp/shap-concat-bug/venv/lib/python3.6/site-packages/shap/explainers/deep/deep_tf.py\u001b[0m in \u001b[0;36mcustom_grad\u001b[0;34m(self, op, *grads)\u001b[0m\n\u001b[1;32m    273\u001b[0m         \"\"\" Passes a gradient op creation request to the correct handler.\n\u001b[1;32m    274\u001b[0m         \"\"\"\n\u001b[0;32m--> 275\u001b[0;31m         \u001b[0;32mreturn\u001b[0m \u001b[0mop_handlers\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mop\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtype\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mgrads\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    276\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    277\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/Workspace/tmp/shap-concat-bug/venv/lib/python3.6/site-packages/shap/explainers/deep/deep_tf.py\u001b[0m in \u001b[0;36mhandler\u001b[0;34m(explainer, op, *grads)\u001b[0m\n\u001b[1;32m    472\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mlinearity_1d\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput_ind\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    473\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0mhandler\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mexplainer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mgrads\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 474\u001b[0;31m         \u001b[0;32mreturn\u001b[0m \u001b[0mlinearity_1d_handler\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput_ind\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mexplainer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mgrads\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    475\u001b[0m     \u001b[0;32mreturn\u001b[0m \u001b[0mhandler\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    476\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/Workspace/tmp/shap-concat-bug/venv/lib/python3.6/site-packages/shap/explainers/deep/deep_tf.py\u001b[0m in \u001b[0;36mlinearity_1d_handler\u001b[0;34m(input_ind, explainer, op, *grads)\u001b[0m\n\u001b[1;32m    479\u001b[0m     \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mop\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    480\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0mi\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0minput_ind\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 481\u001b[0;31m             \u001b[0;32massert\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mexplainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_variable_inputs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mop\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m\"th input to \"\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m\" cannot vary!\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    482\u001b[0m     \u001b[0;32mreturn\u001b[0m \u001b[0mexplainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0morig_grads\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mop\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtype\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mop\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mgrads\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    483\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mAssertionError\u001b[0m: 1th input to concatenate_1/concat cannot vary!"
     ]
    }
   ],
   "source": [
    "e = shap.DeepExplainer(model, [X_1, X_2])\n",
    "shap_values = e.shap_values([X_1, X_2])"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}

以上是关于text 使用Keras中的Concatenate层的shap错误示例。的主要内容,如果未能解决你的问题,请参考以下文章

keras.layers.concatenate 拼接

Keras Concatenate Layers:不同类型的连接函数之间的区别

tf.keras.Concatenate Graph 连接两个输入层时断开连接

Android TextView : “Do not concatenate text displayed with setText”

Keras和张量流连接和拟合错误

DAX 第九篇:文本函数