#include <stdlib.h>
#include <stdio.h>
struct node;
typedef struct node* PtrToNode;
struct node {
long long n;
PtrToNode next;
};
PtrToNode factorial(int n, PtrToNode init);
void multiply(long long n, PtrToNode init, long long carry);
int main() {
int n;
while (1) {
scanf("%d", &n);
if (n > 0) {
break;
} else if (n == 0){
printf("1\n");
return 0;
} else {
printf("Retry.\n");
}
}
PtrToNode init = malloc(sizeof(struct node));
init->n = 1;
init->next = NULL;
PtrToNode head = factorial(n, init);
PtrToNode tail = reverse(head);
PtrToNode backup = tail;
printf("%lld", tail->n);
tail = tail->next;
while(tail != NULL) {
printf("%04lld", tail->n);
tail = tail->next;
}
PtrToNode t;
while (backup != NULL) {
t = backup;
backup = backup->next;
free(t);
}
}
PtrToNode factorial(int n, PtrToNode init) {
for (int i = 1; i <= n; i++)
multiply(i, init, 0);
return init;
}
void multiply(long long n, PtrToNode init, long long carry) {
long long temp = n * init->n + carry;
init->n = temp % 10000;
carry = temp / 10000;
if (init->next != NULL) {
multiply(n, init->next, carry);
} else if (carry > 0) {
init->next = malloc(sizeof(struct node));
init->next->n = carry;
init->next->next = NULL;
} else {
return;
}
}
This is my function to calculate 10000 factorial, and I have calculated the correct answer compared to online data. But, when I limit the n's range to [0.999], I can't even calculate 2000 factorial. Why when I transform the n' range to [0, 9999], then it can calculate 2000! even 10000!?
The value of 10,000! is approximately 2.846 x 1035659. Therefore, we know the factorial of any integer greater than 10,000 will be at least 1035663.
Why conventional way of computing factorial fails for large numbers? A factorial of 100 has 158 digits. It is not possible to store these many digits even if we use long int.
The problem with limiting your digit cluster to three digits is that multiplying a three-digit number by a value above 1,000 may produces a carry into digit number four.
Although this does not create an immediate problem, the error will accumulate as you carry the value up the computation chain. With more than 5,000 digits in the result of 2000! the carry overflow would certainly produce a visible error in the result. This happens around 1258-th step of the computation of 2000!.
This does not happen with four-digit clusters and 10,000 because the only place where the carry could "spill" into the next digit is the computation of the very last cluster, and long long
has plenty of space to accommodate this without an overflow.
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With