Pandas 核心操作知识大全

Pandas常用速查

引入依赖

# 导入模块
import pymysql
import pandas as pd
import numpy as np
import time

# 数据库
from sqlalchemy import create_engine

# 可视化
import matplotlib.pyplot as plt
# 如果你的设备是配备Retina屏幕的mac,可以在jupyter notebook中,使用下面一行代码有效提高图像画质
%config InlineBackend.figure_format = 'retina'
# 解决 plt 中文显示的问题 mymac
plt.rcParams['font.sans-serif'] = ['Arial Unicode MS']
# 设置显示中文 需要先安装字体 aistudio
plt.rcParams['font.sans-serif'] = ['SimHei'] # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号
import seaborn as sns
# notebook渲染图片
%matplotlib inline
import pyecharts

# 忽略版本问题
import warnings
warnings.filterwarnings("ignore")  



image.png

算法相关依赖

# 数据归一化
from sklearn.preprocessing import MinMaxScaler

# kmeans聚类
from sklearn.cluster import KMeans
# DBSCAN聚类
from sklearn.cluster import DBSCAN
# 线性回归算法
from sklearn.linear_model import LinearRegression
# 逻辑回归算法
from sklearn.linear_model import LogisticRegression
# 高斯贝叶斯
from sklearn.naive_bayes import GaussianNB
# 划分训练/测试集
from sklearn.model_selection import train_test_split
# 准确度报告
from sklearn import metrics
# 矩阵报告和均方误差
from sklearn.metrics import classification_report, mean_squared_error

获取数据

from sqlalchemy import create_engine
engine = create_engine('mysql+pymysql://root:root@127.0.0.1:3306/ry?charset=utf8')

#查询插入后相关表名及行数
result_query_sql = "use information_schema;"
engine.execute(result_query_sql)
result_query_sql = "SELECT table_name,table_rows FROM tables WHERE TABLE_NAME LIKE 'log%%' order by table_rows desc;"
df_result = pd.read_sql(result_query_sql, engine)



image.png



生成df

# list转df
df_result = pd.DataFrame(pred,columns=['pred'])
df_result['actual'] = test_target
df_result

# df取子df
df_new = df_old[['col1','col2']]

# dict生成df
df_test = pd.DataFrame({'A':[0.587221, 0.135673, 0.135673, 0.135673, 0.135673], 
                        'B':['a', 'b', 'c', 'd', 'e'],
                        'C':[1, 2, 3, 4, 5]})

# 指定列名
data = pd.DataFrame(dataset.data, columns=dataset.feature_names)

# 使用numpy生成20个指定分布(如标准正态分布)的数
tem = np.random.normal(0, 1, 20)
df3 = pd.DataFrame(tem)

# 生成一个和df长度相同的随机数dataframe
df1 = pd.DataFrame(pd.Series(np.random.randint(1, 10, 135)))

重命名列

# 重命名列
data_scaled = data_scaled.rename(columns={'本体油位': 'OILLV'})

增加列

# df2df
df_jj2yyb['r_time'] = pd.to_datetime(df_jj2yyb['cTime'])

# 新增一列根据salary将数据分为3组
bins = [0,5000, 20000, 50000]
group_names = ['低', '中', '高']
df['categories'] = pd.cut(df['salary'], bins, labels=group_names)

缺失值处理

# 检查数据中是否含有任何缺失值
df.isnull().values.any()

# 查看每列数据缺失值情况
df.isnull().sum()

# 提取某列含有空值的行
df[df['日期'].isnull()]

# 输出每列缺失值具体行数
for i in df.columns:
    if df[i].count() != len(df):
        row = df[i][df[i].isnull().values].index.tolist()
        print('列名:"{}", 第{}行位置有缺失值'.format(i,row))

# 众数填充
heart_df['Thal'].fillna(heart_df['Thal'].mode(dropna=True)[0], inplace=True)

# 连续值列的空值用平均值填充
dfcolumns = heart_df_encoded.columns.values.tolist()
for item in dfcolumns:
    if heart_df_encoded[item].dtype == 'float':
       heart_df_encoded[item].fillna(heart_df_encoded[item].median(), inplace=True)

独热编码

df_encoded = pd.get_dummies(df_data)

替换值

# 按列值替换
num_encode = {
    'AHD': {'No':0, "Yes":1},
}
heart_df.replace(num_encode,inplace=True)

删除列

df_jj2.drop(['coll_time', 'polar', 'conn_type', 'phase', 'id', 'Unnamed: 0'],axis=1,inplace=True)

groupby

# 0.从sklearn加载iris数据集
from sklearn import datasets
# 加载数据集和目标
data, target = datasets.load_iris(return_X_y=True, as_frame=True)
# 合并数据集和目标
iris = pd.concat([data, target], axis=1, sort=False)
iris

# 创建groupby对象
iris_gb = iris.groupby('target')

# 1. 创建频率表,输出每个类中数量多少
iris_gb.size()

# 2. 计算常用的描述统计量
# min、max()、medianhe、std等
# 计算均值
iris_gb.mean()
# 单列
iris_gb['sepal length (cm)'].mean()
# 双列
iris_gb[['sepal length (cm)', 'sepal width (cm)']].mean()

# 3. 查找最大值(最小值)索引
iris_gb.idxmax()

# 按sepal_length最大值这个条件进行了筛选
sepal_largest = iris.loc[iris_gb['sepal length (cm)'].idxmax()]

# 4. Groupby之后重置索引
iris_gb.max().reset_index()
# ↑↓二者效果相同
iris.groupby('target', as_index=False).max()

# 5. 多种统计量汇总,聚合函数agg
iris_gb[['sepal length (cm)', 'sepal width (cm)']].agg(["min", "mean"])

# 6.特定列的聚合
# 为不同的列单独设置不同的统计量
iris_gb.agg({"sepal length (cm)": ["min", "max"], "sepal width (cm)": ["mean", "std"]})

# 7. NamedAgg命名统计量
# 把每个列下面的统计量和列名分别合并起来。可以使用NamedAgg来完成列的命名

iris_gb.agg(
     sepal_min=pd.NamedAgg(column="sepal length (cm)", aggfunc="min"),
     sepal_max=pd.NamedAgg(column="sepal length (cm)", aggfunc="max"),
     petal_mean=pd.NamedAgg(column="petal length (cm)", aggfunc="mean"),
     petal_std=pd.NamedAgg(column="petal length (cm)", aggfunc="std")
 )

# 下述更简洁
iris_gb.agg(
    sepal_min=("sepal length (cm)", "min"),
    sepal_max=("sepal length (cm)", "max"),
    petal_mean=("petal length (cm)", "mean"),
    petal_std=("petal length (cm)", "std")
)

# 8. 使用自定义函数
iris_gb.agg(pd.Series.mean)
# 不仅如此,名称和功能对象也可一起使用。
iris_gb.agg(["min", pd.Series.mean])
# 还可以自定义函数,也都是可以的。
def double_length(x):
    return 2*x.mean()

iris_gb.agg(double_length)
# 如果想更简洁,也可以使用lambda函数。总之,用法非常灵活,可以自由组合搭配。
iris_gb.agg(lambda x: x.mean())

透视表

import numpy as np
import pandas as pd
import seaborn as sns
titanic = sns.load_dataset('titanic')

titanic.pivot_table(index='sex', columns='class')

# 默认对所有列进行聚合,这时给与values参数,只计算想要的结果
agg = pd.cut(titanic["age"],[0,18,80]) # 对年龄数据列进行分段,便于观看
titanic.pivot_table(index=['sex','age'], columns='class',values=['survived','fare'])

# 在实际使用中,并不一定每次都要均值,使用aggfunc指定累计函数
titanic.pivot_table(index='sex', columns='class',aggfunc={'survived':sum, 'fare':'mean'})

# 当需要计算每一组的总数时,可以通过margins 参数来设置:
# margin 的标签可以通过margins_name 参数进行自定义,默认值是"All"。
titanic.pivot_table('survived', index='sex', columns='class', margins=True)

数据筛选

# 取第33行数据
df.iloc[32]

# 某列以xxx字符串开头
df_jj2 = df_512.loc[df_512["transformer"].str.startswith('JJ2')]

df_jj2yya = df_jj2.loc[df_jj2["变压器编号"]=='JJ2YYA']

# 提取第一列中不在第二列出现的数字
df['col1'][~df['col1'].isin(df['col2'])]

# 查找两列值相等的行号
np.where(df.secondType == df.thirdType)

# 包含字符串
results = df['grammer'].str.contains("Python")

# 提取列名
df.columns

# 查看某列唯一值(种类)
df['education'].nunique()

# 删除重复数据
df.drop_duplicates(inplace=True)

# 某列等于某值
df[df.col_name==0.587221]
# df.col_name==0.587221 各行判断结果返回值(True/False)

# 查看某列唯一值及计数
df_jj2["变压器编号"].value_counts()

# 时间段筛选
df_jj2yyb_0501_0701 = df_jj2yyb[(df_jj2yyb['r_time'] >=pd.to_datetime('20200501')) & (df_jj2yyb['r_time'] <= pd.to_datetime('20200701'))]

# 数值筛选
df[(df['popularity'] > 3) & (df['popularity'] < 7)]

# 按数据类型选择列
df = pd.DataFrame({'a': [1, 2] * 3,
                   'b': [True, False] * 3,
                   'c': [1.0, 2.0] * 3})
print('df:', df)

# 输出包含 bool 数据类型的列
print('输出包含 bool 数据类型的列:', df.select_dtypes(include='bool'))

# 输出包含小数数据类型的列
print('输出包含小数数据类型的列:', df.select_dtypes(include=['float64']))

# 输出排除整数的列
print('输出包含小数数据类型的列:', df.select_dtypes(exclude=['int64']))

# 某列字符串截取
df['Time'].str[0:8]

# 随机取num行
ins_1 = df.sample(n=num)

# 数据去重
df.drop_duplicates(['grammer'])

# 按某列排序(降序)
df.sort_values("popularity",inplace=True, ascending=False)

# 取某列最大值所在行
df[df['popularity'] == df['popularity'].max()]

# 取某列最大num行
df.nlargest(num,'col_name')
# 最大num列画横向柱形图
df.nlargest(10).plot(kind='barh')



image.png



差值计算

# axis=0或index表示上下移动, periods表示移动的次数,为正时向下移,为负时向上移动。
print(df.diff( periods=1, axis=‘index‘))
print(df.diff( periods=-1, axis=0))
# axis=1或columns表示左右移动,periods表示移动的次数,为正时向右移,为负时向左移动。
print(df.diff( periods=1, axis=‘columns‘))
print(df.diff( periods=-1, axis=1))

# 变化率计算
data['收盘价(元)'].pct_change()

# 以5个数据作为一个数据滑动窗口,在这个5个数据上取均值
df['收盘价(元)'].rolling(5).mean()

数据修改

# 删除最后一行
df = df.drop(labels=df.shape[0]-1)

# 添加一行数据['Perl',6.6]
row = {'grammer':'Perl','popularity':6.6}
df = df.append(row,ignore_index=True)

# 某列小数转百分数
df.style.format({'data': '{0:.2%}'.format})

# 反转行
df.iloc[::-1, :]

# 以两列制作数据透视
pd.pivot_table(df,values=["salary","score"],index="positionId")

# 同时对两列进行计算
df[["salary","score"]].agg([np.sum,np.mean,np.min])

# 对不同列执行不同的计算
df.agg({"salary":np.sum,"score":np.mean})=pd.to_datetime('20200501')) & (df_jj2yyb['r_time'] <= pd.to_datetime('20200701'))]\n\n# 数值筛选\ndf[(df['popularity'] > 3) & (df['popularity'] < 7)]\n\n# 按数据类型选择列\ndf = pd.DataFrame({'a': [1, 2] * 3,\n 'b': [True, False] * 3,\n 'c': [1.0, 2.0] * 3})\nprint('df:', df)\n\n# 输出包含 bool 数据类型的列\nprint('输出包含 bool 数据类型的列:', df.select_dtypes(include='bool'))\n\n# 输出包含小数数据类型的列\nprint('输出包含小数数据类型的列:', df.select_dtypes(include=['float64']))\n\n# 输出排除整数的列\nprint('输出包含小数数据类型的列:', df.select_dtypes(exclude=['int64']))\n\n# 某列字符串截取\ndf['Time'].str[0:8]\n\n# 随机取num行\nins_1 = df.sample(n=num)\n\n# 数据去重\ndf.drop_duplicates(['grammer'])\n\n# 按某列排序(降序)\ndf.sort_values(\"popularity\",inplace=True, ascending=False)\n\n# 取某列最大值所在行\ndf[df['popularity'] == df['popularity'].max()]\n\n# 取某列最大num行\ndf.nlargest(num,'col_name')\n# 最大num列画横向柱形图\ndf.nlargest(10).plot(kind='barh')"}}},"language":"python","caption":{"text":{"apool":{"nextNum":0,"numToAttrib":{}},"initialAttributedTexts":{"attribs":{"0":"|1+1"},"text":{"0":"\n"}}}}}},"TpBWdOlbWoX6EAxHU5cc3Fo8n0E":{"id":"TpBWdOlbWoX6EAxHU5cc3Fo8n0E","snapshot":{"parent_id":"Mpfhd8cvIop66FxRD6McJJpLnFc","author":"7434804997847990275","align":"center","type":"image","comments":[],"revisions":[],"locked":false,"hidden":false,"image":{"token":"WIGybBZpgoWiN3xI48ochLAbnCg","size":37993,"width":502,"rotation":0,"mimeType":"image/png","scale":1,"height":258,"name":"image.png","crop":[0,0,0,0],"caption":{"text":{"apool":{"nextNum":0,"numToAttrib":null},"initialAttributedTexts":{"attribs":null,"text":null}}}}}},"XtBEdAl4SoM7XHxFIpGc5kWinrb":{"id":"XtBEdAl4SoM7XHxFIpGc5kWinrb","snapshot":{"locked":false,"text":{"apool":{"nextNum":1,"numToAttrib":{"0":["author","7434804997847990275"]}},"initialAttributedTexts":{"attribs":{"0":"*0+4"},"text":{"0":"差值计算"}}},"folded":false,"hidden":false,"author":"7434804997847990275","children":[],"align":"","type":"heading3","parent_id":"Mpfhd8cvIop66FxRD6McJJpLnFc","comments":[],"revisions":[]}},"WuYcdlhMfosJWgxYp7BcM476nab":{"id":"WuYcdlhMfosJWgxYp7BcM476nab","snapshot":{"locked":false,"wrap":false,"revisions":[],"folded":false,"text":{"apool":{"nextNum":1,"numToAttrib":{"0":["author","7434804997847990275"]}},"initialAttributedTexts":{"attribs":{"0":"*0|b+97*0+u"},"text":{"0":"# axis=0或index表示上下移动, periods表示移动的次数,为正时向下移,为负时向上移动。\nprint(df.diff( periods=1, axis=‘index‘))\nprint(df.diff( periods=-1, axis=0))\n# axis=1或columns表示左右移动,periods表示移动的次数,为正时向右移,为负时向左移动。\nprint(df.diff( periods=1, axis=‘columns‘))\nprint(df.diff( periods=-1, axis=1))\n\n# 变化率计算\ndata['收盘价(元)'].pct_change()\n\n# 以5个数据作为一个数据滑动窗口,在这个5个数据上取均值\ndf['收盘价(元)'].rolling(5).mean()"}}},"comments":[],"hidden":false,"align":"","caption":{"text":{"apool":{"nextNum":0,"numToAttrib":{}},"initialAttributedTexts":{"attribs":{"0":"|1+1"},"text":{"0":"\n"}}}},"parent_id":"Mpfhd8cvIop66FxRD6McJJpLnFc","author":"7434804997847990275","children":[],"language":"bash","type":"code"}},"YFUSd0MPpoqtYYxp0cyc4n5Cn9e":{"id":"YFUSd0MPpoqtYYxp0cyc4n5Cn9e","snapshot":{"children":[],"align":"","folded":false,"revisions":[],"author":"7434804997847990275","comments":[],"locked":false,"hidden":false,"text":{"apool":{"nextNum":1,"numToAttrib":{"0":["author","7434804997847990275"]}},"initialAttributedTexts":{"attribs":{"0":"*0+4"},"text":{"0":"数据修改"}}},"type":"heading3","parent_id":"Mpfhd8cvIop66FxRD6McJJpLnFc"}},"K48WdWzIyoad3zxY0fucUEBgnyf":{"id":"K48WdWzIyoad3zxY0fucUEBgnyf","snapshot":{"comments":[],"revisions":[],"caption":{"text":{"apool":{"nextNum":0,"numToAttrib":{}},"initialAttributedTexts":{"attribs":{"0":"|1+1"},"text":{"0":"\n"}}}},"parent_id":"Mpfhd8cvIop66FxRD6McJJpLnFc","locked":false,"wrap":false,"language":"python","type":"code","hidden":false,"author":"7434804997847990275","children":[],"folded":false,"text":{"initialAttributedTexts":{"attribs":{"0":"*0|k+an*0+15"},"text":{"0":"# 删除最后一行\ndf = df.drop(labels=df.shape[0]-1)\n\n# 添加一行数据['Perl',6.6]\nrow = {'grammer':'Perl','popularity':6.6}\ndf = df.append(row,ignore_index=True)\n\n# 某列小数转百分数\ndf.style.format({'data': '{0:.2%}'.format})\n\n# 反转行\ndf.iloc[::-1, :]\n\n# 以两列制作数据透视\npd.pivot_table(df,values=[\"salary\",\"score\"],index=\"positionId\")\n\n# 同时对两列进行计算\ndf[[\"salary\",\"score\"]].agg([np.sum,np.mean,np.min])\n\n# 对不同列执行不同的计算\ndf.agg({\"salary\":np.sum,\"score\":np.mean})"}},"apool":{"nextNum":1,"numToAttrib":{"0":["author","7434804997847990275"]}}},"align":""}},"Mpfhd8cvIop66FxRD6McJJpLnFc":{"id":"Mpfhd8cvIop66FxRD6McJJpLnFc","snapshot":{"revisions":null,"locked":false,"text":{"apool":{"nextNum":1,"numToAttrib":{"0":["author","7434804997847990275"]}},"initialAttributedTexts":{"attribs":{"0":"*0+f"},"text":{"0":"Pandas 核心操作知识大全"}}},"align":"","type":"page","parent_id":"","comments":null,"hidden":false,"author":"7434804997847990275","children":["OySmdebK1oaU8rxWsoscdgU8nUe","ZbsIdmlaZouN4lx0SCQc0dkzn1c","SWztdn6sYoHXrGxbN4Jc03PUnTb","TwwCdht4aon31YxYr0ucccWYn9c","E6IIdddtsoTT7mx1OgTc44PxnFd","Dt4Wd6IoToSXryxheyrcuhEVnBd","RTIIdJlC2ooxAaxV3Qnc0PkwnVe","OCgddwnt5oZclvx3K4Rc96EUndb","Zm1td5k9Uowvxqxg1BNcu6Lenvg","IGmhdOqoOoXjmFxu9JVcE8A7nUc","VtTGd9geuoq3X3x3ZUWcFnkfn1b","CxMrdF6pHo9fB2xjDv7cXy76nnc","KSwZdbB0GoFpJUxbjWQclSEMnRg","K6pKdiZhzocvByxSsYYcyvnEnbh","AjymduAHRotJrlxeXZ5cDSoLnMM","Mk2zd5Hm6oEANFxDJyecNwPRn1c","OmRYdrhT3odFnrxZARqcC0jZnme","H43qdiEEVo7Xd8xL3facD51knge","Qbs0dGUS6ozyShxnBDBcBmCOnWd","RFjwd3cAlopPUVxK9jXci3Xtnqc","VYtpdUaE2okgQbxizPbcAI6Dnte","HR5mdHSixoKJ6Zx2raQcBmSGnFK","DoS8dM0URolAI5xMLSacq8Q8nMd","MCa4dU142oi1gqxf2AZcz0JMnth","IVBNddnbioGWMzxLVnWcDJQUnQX","EDwrd69XAorjsox3QHjcV6ognZg","N2gWdpm8doCcvPxViGyciHJZnag","XS6Qd3TveoHXrsxNq7cchQTtnGe","QhUQdYKGGoNUgWx9ijbckFDansp","NoP1dUzlmoz8QOxzRbXcgkQ3nBg","TpBWdOlbWoX6EAxHU5cc3Fo8n0E","XtBEdAl4SoM7XHxFIpGc5kWinrb","WuYcdlhMfosJWgxYp7BcM476nab","YFUSd0MPpoqtYYxp0cyc4n5Cn9e","K48WdWzIyoad3zxY0fucUEBgnyf"],"doc_info":{"editors":["7434804997847990275"],"options":["editors","edit_time"],"deleted_editors":null,"option_modified":null}}}},"payloadMap":{},"extra":{"channel":"saas","pasteRandomId":"3fea2e96-3af4-4f1d-a551-4568fabf2411","mention_page_title":{},"external_mention_url":{},"isEqualBlockSelection":true},"isKeepQuoteContainer":false,"selection":[{"id":3334,"type":"block","recordId":"OySmdebK1oaU8rxWsoscdgU8nUe"},{"id":3335,"type":"block","recordId":"ZbsIdmlaZouN4lx0SCQc0dkzn1c"},{"id":3336,"type":"block","recordId":"SWztdn6sYoHXrGxbN4Jc03PUnTb"},{"id":3337,"type":"block","recordId":"TwwCdht4aon31YxYr0ucccWYn9c"},{"id":3338,"type":"block","recordId":"E6IIdddtsoTT7mx1OgTc44PxnFd"},{"id":3339,"type":"block","recordId":"Dt4Wd6IoToSXryxheyrcuhEVnBd"},{"id":3340,"type":"block","recordId":"RTIIdJlC2ooxAaxV3Qnc0PkwnVe"},{"id":3341,"type":"block","recordId":"OCgddwnt5oZclvx3K4Rc96EUndb"},{"id":3342,"type":"block","recordId":"Zm1td5k9Uowvxqxg1BNcu6Lenvg"},{"id":3343,"type":"block","recordId":"IGmhdOqoOoXjmFxu9JVcE8A7nUc"},{"id":3344,"type":"block","recordId":"VtTGd9geuoq3X3x3ZUWcFnkfn1b"},{"id":3345,"type":"block","recordId":"CxMrdF6pHo9fB2xjDv7cXy76nnc"},{"id":3346,"type":"block","recordId":"KSwZdbB0GoFpJUxbjWQclSEMnRg"},{"id":3347,"type":"block","recordId":"K6pKdiZhzocvByxSsYYcyvnEnbh"},{"id":3348,"type":"block","recordId":"AjymduAHRotJrlxeXZ5cDSoLnMM"},{"id":3349,"type":"block","recordId":"Mk2zd5Hm6oEANFxDJyecNwPRn1c"},{"id":3350,"type":"block","recordId":"OmRYdrhT3odFnrxZARqcC0jZnme"},{"id":3351,"type":"block","recordId":"H43qdiEEVo7Xd8xL3facD51knge"},{"id":3352,"type":"block","recordId":"Qbs0dGUS6ozyShxnBDBcBmCOnWd"},{"id":3353,"type":"block","recordId":"RFjwd3cAlopPUVxK9jXci3Xtnqc"},{"id":3354,"type":"block","recordId":"VYtpdUaE2okgQbxizPbcAI6Dnte"},{"id":3355,"type":"block","recordId":"HR5mdHSixoKJ6Zx2raQcBmSGnFK"},{"id":3356,"type":"block","recordId":"DoS8dM0URolAI5xMLSacq8Q8nMd"},{"id":3357,"type":"block","recordId":"MCa4dU142oi1gqxf2AZcz0JMnth"},{"id":3358,"type":"block","recordId":"IVBNddnbioGWMzxLVnWcDJQUnQX"},{"id":3359,"type":"block","recordId":"EDwrd69XAorjsox3QHjcV6ognZg"},{"id":3360,"type":"block","recordId":"N2gWdpm8doCcvPxViGyciHJZnag"},{"id":3361,"type":"block","recordId":"XS6Qd3TveoHXrsxNq7cchQTtnGe"},{"id":3362,"type":"block","recordId":"QhUQdYKGGoNUgWx9ijbckFDansp"},{"id":3363,"type":"block","recordId":"NoP1dUzlmoz8QOxzRbXcgkQ3nBg"},{"id":3364,"type":"block","recordId":"TpBWdOlbWoX6EAxHU5cc3Fo8n0E"},{"id":3365,"type":"block","recordId":"XtBEdAl4SoM7XHxFIpGc5kWinrb"},{"id":3366,"type":"block","recordId":"WuYcdlhMfosJWgxYp7BcM476nab"},{"id":3367,"type":"block","recordId":"YFUSd0MPpoqtYYxp0cyc4n5Cn9e"},{"id":3368,"type":"block","recordId":"K48WdWzIyoad3zxY0fucUEBgnyf"}],"pasteFlag":"964c81ce-1b4c-424c-88b0-c3c2804b451f"}" data-lark-record-format="docx/record" class="lark-record-clipboard">

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值