Remove a strange old piece of code apparently for detecting a server gone to lunch
This code appears to be some kind of backstop so that the client will notice a server connection that's died because of a network interruption. Checking errno that was last set christ-knows-where is not the right way to do this, though. The normal write to the server for the lag check should eventually see the host/net unreachable error if this happens.
This commit is contained in:
@@ -485,7 +485,6 @@ void do_server (fd_set *rd, fd_set *wr)
|
|||||||
char buffer[BIG_BUFFER_SIZE + 1];
|
char buffer[BIG_BUFFER_SIZE + 1];
|
||||||
int des,
|
int des,
|
||||||
i;
|
i;
|
||||||
static time_t last_timeout = 0;
|
|
||||||
|
|
||||||
/* Process server timeouts */
|
/* Process server timeouts */
|
||||||
do_idle_server();
|
do_idle_server();
|
||||||
@@ -603,7 +602,6 @@ static time_t last_timeout = 0;
|
|||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
{
|
{
|
||||||
last_timeout = 0;
|
|
||||||
parsing_server_index = i;
|
parsing_server_index = i;
|
||||||
server_list[i].last_msg = now;
|
server_list[i].last_msg = now;
|
||||||
parse_server(buffer);
|
parse_server(buffer);
|
||||||
@@ -615,17 +613,6 @@ static time_t last_timeout = 0;
|
|||||||
}
|
}
|
||||||
from_server = primary_server;
|
from_server = primary_server;
|
||||||
}
|
}
|
||||||
if (server_list[i].read != -1 && (errno == ENETUNREACH || errno == EHOSTUNREACH))
|
|
||||||
{
|
|
||||||
if (last_timeout == 0)
|
|
||||||
last_timeout = now;
|
|
||||||
else if (now - last_timeout > 600)
|
|
||||||
{
|
|
||||||
close_server(i, empty_string);
|
|
||||||
server_list[i].reconnecting = 1;
|
|
||||||
get_connected(i, -1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (primary_server == -1 || !is_server_open(primary_server))
|
if (primary_server == -1 || !is_server_open(primary_server))
|
||||||
|
|||||||
Reference in New Issue
Block a user