public class ColumnTruncateWork extends MapWork implements java.io.Serializable
SAMPLING_ON_PREV_MR, SAMPLING_ON_START| Constructor and Description |
|---|
ColumnTruncateWork() |
ColumnTruncateWork(java.util.List<java.lang.Integer> droppedColumns,
java.lang.String inputDir,
java.lang.String outputDir) |
ColumnTruncateWork(java.util.List<java.lang.Integer> droppedColumns,
java.lang.String inputDir,
java.lang.String outputDir,
boolean hasDynamicPartitions,
DynamicPartitionCtx dynPartCtx) |
| Modifier and Type | Method and Description |
|---|---|
java.util.List<java.lang.Integer> |
getDroppedColumns() |
DynamicPartitionCtx |
getDynPartCtx() |
java.lang.String |
getInputDir() |
java.lang.String |
getInputformat() |
ListBucketingCtx |
getListBucketingCtx() |
java.lang.Class<? extends Mapper> |
getMapperClass() |
java.lang.Long |
getMinSplitSize() |
java.lang.String |
getOutputDir() |
boolean |
hasDynamicPartitions() |
boolean |
isGatheringStats() |
boolean |
isListBucketingAlterTableConcatenate() |
void |
setDroppedColumns(java.util.List<java.lang.Integer> droppedColumns) |
void |
setDynPartCtx(DynamicPartitionCtx dynPartCtx) |
void |
setHasDynamicPartitions(boolean hasDynamicPartitions) |
void |
setInputPaths(java.lang.String inputDir) |
void |
setListBucketingCtx(ListBucketingCtx listBucketingCtx) |
void |
setOutputDir(java.lang.String outputDir) |
addIndexIntermediateFile, addMapWork, configureJobConf, deriveExplainAttributes, getAliases, getAliasToPartnInfo, getAliasToWork, getAllRootOperators, getBucketedColsByDirectory, getHadoopSupportsSplittable, getIndexIntermediateFile, getJoinTree, getMapLocalWork, getMaxSplitSize, getMinSplitSizePerNode, getMinSplitSizePerRack, getNameToSplitSample, getNumMapTasks, getOpParseCtxMap, getPartitionDescs, getPaths, getPathToAliases, getPathToPartitionInfo, getSamplingType, getSamplingTypeString, getSortedColsByDirectory, getTmpHDFSFileURI, getTruncatedPathToAliases, getWorks, initialize, isInputFormatSorted, isMapperCannotSpanPartns, isUseBucketizedHiveInputFormat, mergeAliasedInput, mergingInto, resolveDynamicPartitionStoredAsSubDirsMerge, setAliasToPartnInfo, setAliasToWork, setHadoopSupportsSplittable, setInputformat, setInputFormatSorted, setJoinTree, setMapLocalWork, setMapperCannotSpanPartns, setMaxSplitSize, setMinSplitSize, setMinSplitSizePerNode, setMinSplitSizePerRack, setNameToSplitSample, setNumMapTasks, setOpParseCtxMap, setPathToAliases, setPathToPartitionInfo, setSamplingType, setTmpHDFSFileURI, setUseBucketizedHiveInputFormatgetAllOperators, setGatheringStatsclonepublic ColumnTruncateWork()
public ColumnTruncateWork(java.util.List<java.lang.Integer> droppedColumns,
java.lang.String inputDir,
java.lang.String outputDir)
public ColumnTruncateWork(java.util.List<java.lang.Integer> droppedColumns,
java.lang.String inputDir,
java.lang.String outputDir,
boolean hasDynamicPartitions,
DynamicPartitionCtx dynPartCtx)
public java.lang.String getInputDir()
public void setInputPaths(java.lang.String inputDir)
public java.lang.String getOutputDir()
public void setOutputDir(java.lang.String outputDir)
public java.lang.Class<? extends Mapper> getMapperClass()
public java.lang.Long getMinSplitSize()
getMinSplitSize in class MapWorkpublic java.lang.String getInputformat()
getInputformat in class MapWorkpublic boolean isGatheringStats()
isGatheringStats in class BaseWorkpublic boolean hasDynamicPartitions()
public void setHasDynamicPartitions(boolean hasDynamicPartitions)
public DynamicPartitionCtx getDynPartCtx()
public void setDynPartCtx(DynamicPartitionCtx dynPartCtx)
public ListBucketingCtx getListBucketingCtx()
public void setListBucketingCtx(ListBucketingCtx listBucketingCtx)
listBucketingCtx - the listBucketingCtx to setpublic boolean isListBucketingAlterTableConcatenate()
public java.util.List<java.lang.Integer> getDroppedColumns()
public void setDroppedColumns(java.util.List<java.lang.Integer> droppedColumns)
Copyright © 2012 The Apache Software Foundation